compiler, runtime: stop using __go_runtime_error
[official-gcc.git] / libgo / go / runtime / proc.go
bloba025137f367a10c41db33dabae43627990dea579
1 // Copyright 2014 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 package runtime
7 import (
8 "internal/cpu"
9 "runtime/internal/atomic"
10 "runtime/internal/sys"
11 "unsafe"
14 // Functions called by C code.
15 //go:linkname main
16 //go:linkname goparkunlock
17 //go:linkname newextram
18 //go:linkname acquirep
19 //go:linkname releasep
20 //go:linkname incidlelocked
21 //go:linkname ginit
22 //go:linkname schedinit
23 //go:linkname ready
24 //go:linkname stopm
25 //go:linkname handoffp
26 //go:linkname wakep
27 //go:linkname stoplockedm
28 //go:linkname schedule
29 //go:linkname execute
30 //go:linkname goexit1
31 //go:linkname reentersyscall
32 //go:linkname reentersyscallblock
33 //go:linkname exitsyscall
34 //go:linkname gfget
35 //go:linkname kickoff
36 //go:linkname mstart1
37 //go:linkname mexit
38 //go:linkname globrunqput
39 //go:linkname pidleget
41 // Exported for test (see runtime/testdata/testprogcgo/dropm_stub.go).
42 //go:linkname getm
44 // Function called by misc/cgo/test.
45 //go:linkname lockedOSThread
47 // C functions for thread and context management.
48 func newosproc(*m)
50 //go:noescape
51 func malg(bool, bool, *unsafe.Pointer, *uintptr) *g
53 //go:noescape
54 func resetNewG(*g, *unsafe.Pointer, *uintptr)
55 func gogo(*g)
56 func setGContext()
57 func makeGContext(*g, unsafe.Pointer, uintptr)
58 func getTraceback(me, gp *g)
59 func gtraceback(*g)
60 func _cgo_notify_runtime_init_done()
61 func alreadyInCallers() bool
62 func stackfree(*g)
64 // Functions created by the compiler.
65 //extern __go_init_main
66 func main_init()
68 //extern main.main
69 func main_main()
71 var buildVersion = sys.TheVersion
73 // set using cmd/go/internal/modload.ModInfoProg
74 var modinfo string
76 // Goroutine scheduler
77 // The scheduler's job is to distribute ready-to-run goroutines over worker threads.
79 // The main concepts are:
80 // G - goroutine.
81 // M - worker thread, or machine.
82 // P - processor, a resource that is required to execute Go code.
83 // M must have an associated P to execute Go code, however it can be
84 // blocked or in a syscall w/o an associated P.
86 // Design doc at https://golang.org/s/go11sched.
88 // Worker thread parking/unparking.
89 // We need to balance between keeping enough running worker threads to utilize
90 // available hardware parallelism and parking excessive running worker threads
91 // to conserve CPU resources and power. This is not simple for two reasons:
92 // (1) scheduler state is intentionally distributed (in particular, per-P work
93 // queues), so it is not possible to compute global predicates on fast paths;
94 // (2) for optimal thread management we would need to know the future (don't park
95 // a worker thread when a new goroutine will be readied in near future).
97 // Three rejected approaches that would work badly:
98 // 1. Centralize all scheduler state (would inhibit scalability).
99 // 2. Direct goroutine handoff. That is, when we ready a new goroutine and there
100 // is a spare P, unpark a thread and handoff it the thread and the goroutine.
101 // This would lead to thread state thrashing, as the thread that readied the
102 // goroutine can be out of work the very next moment, we will need to park it.
103 // Also, it would destroy locality of computation as we want to preserve
104 // dependent goroutines on the same thread; and introduce additional latency.
105 // 3. Unpark an additional thread whenever we ready a goroutine and there is an
106 // idle P, but don't do handoff. This would lead to excessive thread parking/
107 // unparking as the additional threads will instantly park without discovering
108 // any work to do.
110 // The current approach:
111 // We unpark an additional thread when we ready a goroutine if (1) there is an
112 // idle P and there are no "spinning" worker threads. A worker thread is considered
113 // spinning if it is out of local work and did not find work in global run queue/
114 // netpoller; the spinning state is denoted in m.spinning and in sched.nmspinning.
115 // Threads unparked this way are also considered spinning; we don't do goroutine
116 // handoff so such threads are out of work initially. Spinning threads do some
117 // spinning looking for work in per-P run queues before parking. If a spinning
118 // thread finds work it takes itself out of the spinning state and proceeds to
119 // execution. If it does not find work it takes itself out of the spinning state
120 // and then parks.
121 // If there is at least one spinning thread (sched.nmspinning>1), we don't unpark
122 // new threads when readying goroutines. To compensate for that, if the last spinning
123 // thread finds work and stops spinning, it must unpark a new spinning thread.
124 // This approach smooths out unjustified spikes of thread unparking,
125 // but at the same time guarantees eventual maximal CPU parallelism utilization.
127 // The main implementation complication is that we need to be very careful during
128 // spinning->non-spinning thread transition. This transition can race with submission
129 // of a new goroutine, and either one part or another needs to unpark another worker
130 // thread. If they both fail to do that, we can end up with semi-persistent CPU
131 // underutilization. The general pattern for goroutine readying is: submit a goroutine
132 // to local work queue, #StoreLoad-style memory barrier, check sched.nmspinning.
133 // The general pattern for spinning->non-spinning transition is: decrement nmspinning,
134 // #StoreLoad-style memory barrier, check all per-P work queues for new work.
135 // Note that all this complexity does not apply to global run queue as we are not
136 // sloppy about thread unparking when submitting to global queue. Also see comments
137 // for nmspinning manipulation.
139 var (
140 m0 m
141 g0 g
142 raceprocctx0 uintptr
145 // main_init_done is a signal used by cgocallbackg that initialization
146 // has been completed. It is made before _cgo_notify_runtime_init_done,
147 // so all cgo calls can rely on it existing. When main_init is complete,
148 // it is closed, meaning cgocallbackg can reliably receive from it.
149 var main_init_done chan bool
151 // mainStarted indicates that the main M has started.
152 var mainStarted bool
154 // runtimeInitTime is the nanotime() at which the runtime started.
155 var runtimeInitTime int64
157 // Value to use for signal mask for newly created M's.
158 var initSigmask sigset
160 // The main goroutine.
161 func main(unsafe.Pointer) {
162 g := getg()
164 // Max stack size is 1 GB on 64-bit, 250 MB on 32-bit.
165 // Using decimal instead of binary GB and MB because
166 // they look nicer in the stack overflow failure message.
167 if sys.PtrSize == 8 {
168 maxstacksize = 1000000000
169 } else {
170 maxstacksize = 250000000
173 // Allow newproc to start new Ms.
174 mainStarted = true
176 if GOARCH != "wasm" { // no threads on wasm yet, so no sysmon
177 systemstack(func() {
178 newm(sysmon, nil)
182 // Lock the main goroutine onto this, the main OS thread,
183 // during initialization. Most programs won't care, but a few
184 // do require certain calls to be made by the main thread.
185 // Those can arrange for main.main to run in the main thread
186 // by calling runtime.LockOSThread during initialization
187 // to preserve the lock.
188 lockOSThread()
190 if g.m != &m0 {
191 throw("runtime.main not on m0")
194 if nanotime() == 0 {
195 throw("nanotime returning zero")
198 // Defer unlock so that runtime.Goexit during init does the unlock too.
199 needUnlock := true
200 defer func() {
201 if needUnlock {
202 unlockOSThread()
206 // Record when the world started.
207 runtimeInitTime = nanotime()
209 main_init_done = make(chan bool)
210 if iscgo {
211 // Start the template thread in case we enter Go from
212 // a C-created thread and need to create a new thread.
213 startTemplateThread()
214 _cgo_notify_runtime_init_done()
217 fn := main_init // make an indirect call, as the linker doesn't know the address of the main package when laying down the runtime
218 fn()
219 createGcRootsIndex()
220 close(main_init_done)
222 // For gccgo we have to wait until after main is initialized
223 // to enable GC, because initializing main registers the GC roots.
224 gcenable()
226 needUnlock = false
227 unlockOSThread()
229 if isarchive || islibrary {
230 // A program compiled with -buildmode=c-archive or c-shared
231 // has a main, but it is not executed.
232 return
234 fn = main_main // make an indirect call, as the linker doesn't know the address of the main package when laying down the runtime
235 fn()
236 if raceenabled {
237 racefini()
240 // Make racy client program work: if panicking on
241 // another goroutine at the same time as main returns,
242 // let the other goroutine finish printing the panic trace.
243 // Once it does, it will exit. See issues 3934 and 20018.
244 if atomic.Load(&runningPanicDefers) != 0 {
245 // Running deferred functions should not take long.
246 for c := 0; c < 1000; c++ {
247 if atomic.Load(&runningPanicDefers) == 0 {
248 break
250 Gosched()
253 if atomic.Load(&panicking) != 0 {
254 gopark(nil, nil, waitReasonPanicWait, traceEvGoStop, 1)
257 exit(0)
258 for {
259 var x *int32
260 *x = 0
264 // os_beforeExit is called from os.Exit(0).
265 //go:linkname os_beforeExit os.runtime_beforeExit
266 func os_beforeExit() {
267 if raceenabled {
268 racefini()
272 // start forcegc helper goroutine
273 func init() {
274 expectSystemGoroutine()
275 go forcegchelper()
278 func forcegchelper() {
279 setSystemGoroutine()
281 forcegc.g = getg()
282 for {
283 lock(&forcegc.lock)
284 if forcegc.idle != 0 {
285 throw("forcegc: phase error")
287 atomic.Store(&forcegc.idle, 1)
288 goparkunlock(&forcegc.lock, waitReasonForceGGIdle, traceEvGoBlock, 1)
289 // this goroutine is explicitly resumed by sysmon
290 if debug.gctrace > 0 {
291 println("GC forced")
293 // Time-triggered, fully concurrent.
294 gcStart(gcTrigger{kind: gcTriggerTime, now: nanotime()})
298 //go:nosplit
300 // Gosched yields the processor, allowing other goroutines to run. It does not
301 // suspend the current goroutine, so execution resumes automatically.
302 func Gosched() {
303 checkTimeouts()
304 mcall(gosched_m)
307 // goschedguarded yields the processor like gosched, but also checks
308 // for forbidden states and opts out of the yield in those cases.
309 //go:nosplit
310 func goschedguarded() {
311 mcall(goschedguarded_m)
314 // Puts the current goroutine into a waiting state and calls unlockf.
315 // If unlockf returns false, the goroutine is resumed.
316 // unlockf must not access this G's stack, as it may be moved between
317 // the call to gopark and the call to unlockf.
318 // Reason explains why the goroutine has been parked.
319 // It is displayed in stack traces and heap dumps.
320 // Reasons should be unique and descriptive.
321 // Do not re-use reasons, add new ones.
322 func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason waitReason, traceEv byte, traceskip int) {
323 if reason != waitReasonSleep {
324 checkTimeouts() // timeouts may expire while two goroutines keep the scheduler busy
326 mp := acquirem()
327 gp := mp.curg
328 status := readgstatus(gp)
329 if status != _Grunning && status != _Gscanrunning {
330 throw("gopark: bad g status")
332 mp.waitlock = lock
333 mp.waitunlockf = unlockf
334 gp.waitreason = reason
335 mp.waittraceev = traceEv
336 mp.waittraceskip = traceskip
337 releasem(mp)
338 // can't do anything that might move the G between Ms here.
339 mcall(park_m)
342 // Puts the current goroutine into a waiting state and unlocks the lock.
343 // The goroutine can be made runnable again by calling goready(gp).
344 func goparkunlock(lock *mutex, reason waitReason, traceEv byte, traceskip int) {
345 gopark(parkunlock_c, unsafe.Pointer(lock), reason, traceEv, traceskip)
348 func goready(gp *g, traceskip int) {
349 systemstack(func() {
350 ready(gp, traceskip, true)
354 //go:nosplit
355 func acquireSudog() *sudog {
356 // Delicate dance: the semaphore implementation calls
357 // acquireSudog, acquireSudog calls new(sudog),
358 // new calls malloc, malloc can call the garbage collector,
359 // and the garbage collector calls the semaphore implementation
360 // in stopTheWorld.
361 // Break the cycle by doing acquirem/releasem around new(sudog).
362 // The acquirem/releasem increments m.locks during new(sudog),
363 // which keeps the garbage collector from being invoked.
364 mp := acquirem()
365 pp := mp.p.ptr()
366 if len(pp.sudogcache) == 0 {
367 lock(&sched.sudoglock)
368 // First, try to grab a batch from central cache.
369 for len(pp.sudogcache) < cap(pp.sudogcache)/2 && sched.sudogcache != nil {
370 s := sched.sudogcache
371 sched.sudogcache = s.next
372 s.next = nil
373 pp.sudogcache = append(pp.sudogcache, s)
375 unlock(&sched.sudoglock)
376 // If the central cache is empty, allocate a new one.
377 if len(pp.sudogcache) == 0 {
378 pp.sudogcache = append(pp.sudogcache, new(sudog))
381 n := len(pp.sudogcache)
382 s := pp.sudogcache[n-1]
383 pp.sudogcache[n-1] = nil
384 pp.sudogcache = pp.sudogcache[:n-1]
385 if s.elem != nil {
386 throw("acquireSudog: found s.elem != nil in cache")
388 releasem(mp)
389 return s
392 //go:nosplit
393 func releaseSudog(s *sudog) {
394 if s.elem != nil {
395 throw("runtime: sudog with non-nil elem")
397 if s.isSelect {
398 throw("runtime: sudog with non-false isSelect")
400 if s.next != nil {
401 throw("runtime: sudog with non-nil next")
403 if s.prev != nil {
404 throw("runtime: sudog with non-nil prev")
406 if s.waitlink != nil {
407 throw("runtime: sudog with non-nil waitlink")
409 if s.c != nil {
410 throw("runtime: sudog with non-nil c")
412 gp := getg()
413 if gp.param != nil {
414 throw("runtime: releaseSudog with non-nil gp.param")
416 mp := acquirem() // avoid rescheduling to another P
417 pp := mp.p.ptr()
418 if len(pp.sudogcache) == cap(pp.sudogcache) {
419 // Transfer half of local cache to the central cache.
420 var first, last *sudog
421 for len(pp.sudogcache) > cap(pp.sudogcache)/2 {
422 n := len(pp.sudogcache)
423 p := pp.sudogcache[n-1]
424 pp.sudogcache[n-1] = nil
425 pp.sudogcache = pp.sudogcache[:n-1]
426 if first == nil {
427 first = p
428 } else {
429 last.next = p
431 last = p
433 lock(&sched.sudoglock)
434 last.next = sched.sudogcache
435 sched.sudogcache = first
436 unlock(&sched.sudoglock)
438 pp.sudogcache = append(pp.sudogcache, s)
439 releasem(mp)
442 // funcPC returns the entry PC of the function f.
443 // It assumes that f is a func value. Otherwise the behavior is undefined.
444 // CAREFUL: In programs with plugins, funcPC can return different values
445 // for the same function (because there are actually multiple copies of
446 // the same function in the address space). To be safe, don't use the
447 // results of this function in any == expression. It is only safe to
448 // use the result as an address at which to start executing code.
450 // For gccgo note that this differs from the gc implementation; the gc
451 // implementation adds sys.PtrSize to the address of the interface
452 // value, but GCC's alias analysis decides that that can not be a
453 // reference to the second field of the interface, and in some cases
454 // it drops the initialization of the second field as a dead store.
455 //go:nosplit
456 func funcPC(f interface{}) uintptr {
457 i := (*iface)(unsafe.Pointer(&f))
458 r := *(*uintptr)(i.data)
459 if cpu.FunctionDescriptors {
460 // With PPC64 ELF ABI v1 function descriptors the
461 // function address is a pointer to a struct whose
462 // first field is the actual PC.
463 r = *(*uintptr)(unsafe.Pointer(r))
465 return r
468 func lockedOSThread() bool {
469 gp := getg()
470 return gp.lockedm != 0 && gp.m.lockedg != 0
473 var (
474 allgs []*g
475 allglock mutex
478 func allgadd(gp *g) {
479 if readgstatus(gp) == _Gidle {
480 throw("allgadd: bad status Gidle")
483 lock(&allglock)
484 allgs = append(allgs, gp)
485 allglen = uintptr(len(allgs))
486 unlock(&allglock)
489 const (
490 // Number of goroutine ids to grab from sched.goidgen to local per-P cache at once.
491 // 16 seems to provide enough amortization, but other than that it's mostly arbitrary number.
492 _GoidCacheBatch = 16
495 // cpuinit extracts the environment variable GODEBUG from the environment on
496 // Unix-like operating systems and calls internal/cpu.Initialize.
497 func cpuinit() {
498 const prefix = "GODEBUG="
499 var env string
501 switch GOOS {
502 case "aix", "darwin", "dragonfly", "freebsd", "netbsd", "openbsd", "illumos", "solaris", "linux":
503 cpu.DebugOptions = true
505 // Similar to goenv_unix but extracts the environment value for
506 // GODEBUG directly.
507 // TODO(moehrmann): remove when general goenvs() can be called before cpuinit()
508 n := int32(0)
509 for argv_index(argv, argc+1+n) != nil {
513 for i := int32(0); i < n; i++ {
514 p := argv_index(argv, argc+1+i)
515 s := *(*string)(unsafe.Pointer(&stringStruct{unsafe.Pointer(p), findnull(p)}))
517 if hasPrefix(s, prefix) {
518 env = gostring(p)[len(prefix):]
519 break
524 cpu.Initialize(env)
527 func ginit() {
528 _m_ := &m0
529 _g_ := &g0
530 _m_.g0 = _g_
531 _m_.curg = _g_
532 _g_.m = _m_
533 setg(_g_)
536 // The bootstrap sequence is:
538 // call osinit
539 // call schedinit
540 // make & queue new G
541 // call runtime·mstart
543 // The new G calls runtime·main.
544 func schedinit() {
545 _g_ := getg()
546 sched.maxmcount = 10000
548 usestackmaps = probestackmaps()
550 mallocinit()
551 mcommoninit(_g_.m)
552 cpuinit() // must run before alginit
553 alginit() // maps must not be used before this call
555 msigsave(_g_.m)
556 initSigmask = _g_.m.sigmask
558 goargs()
559 goenvs()
560 parsedebugvars()
561 gcinit()
563 sched.lastpoll = uint64(nanotime())
564 procs := ncpu
565 if n, ok := atoi32(gogetenv("GOMAXPROCS")); ok && n > 0 {
566 procs = n
568 if procresize(procs) != nil {
569 throw("unknown runnable goroutine during bootstrap")
572 // For cgocheck > 1, we turn on the write barrier at all times
573 // and check all pointer writes. We can't do this until after
574 // procresize because the write barrier needs a P.
575 if debug.cgocheck > 1 {
576 writeBarrier.cgo = true
577 writeBarrier.enabled = true
578 for _, p := range allp {
579 p.wbBuf.reset()
583 if buildVersion == "" {
584 // Condition should never trigger. This code just serves
585 // to ensure runtime·buildVersion is kept in the resulting binary.
586 buildVersion = "unknown"
588 if len(modinfo) == 1 {
589 // Condition should never trigger. This code just serves
590 // to ensure runtime·modinfo is kept in the resulting binary.
591 modinfo = ""
595 func dumpgstatus(gp *g) {
596 _g_ := getg()
597 print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
598 print("runtime: g: g=", _g_, ", goid=", _g_.goid, ", g->atomicstatus=", readgstatus(_g_), "\n")
601 func checkmcount() {
602 // sched lock is held
603 if mcount() > sched.maxmcount {
604 print("runtime: program exceeds ", sched.maxmcount, "-thread limit\n")
605 throw("thread exhaustion")
609 func mcommoninit(mp *m) {
610 _g_ := getg()
612 // g0 stack won't make sense for user (and is not necessary unwindable).
613 if _g_ != _g_.m.g0 {
614 callers(1, mp.createstack[:])
617 lock(&sched.lock)
618 if sched.mnext+1 < sched.mnext {
619 throw("runtime: thread ID overflow")
621 mp.id = sched.mnext
622 sched.mnext++
623 checkmcount()
625 mp.fastrand[0] = 1597334677 * uint32(mp.id)
626 mp.fastrand[1] = uint32(cputicks())
627 if mp.fastrand[0]|mp.fastrand[1] == 0 {
628 mp.fastrand[1] = 1
631 mpreinit(mp)
633 // Add to allm so garbage collector doesn't free g->m
634 // when it is just in a register or thread-local storage.
635 mp.alllink = allm
637 // NumCgoCall() iterates over allm w/o schedlock,
638 // so we need to publish it safely.
639 atomicstorep(unsafe.Pointer(&allm), unsafe.Pointer(mp))
640 unlock(&sched.lock)
643 // Mark gp ready to run.
644 func ready(gp *g, traceskip int, next bool) {
645 if trace.enabled {
646 traceGoUnpark(gp, traceskip)
649 status := readgstatus(gp)
651 // Mark runnable.
652 _g_ := getg()
653 mp := acquirem() // disable preemption because it can be holding p in a local var
654 if status&^_Gscan != _Gwaiting {
655 dumpgstatus(gp)
656 throw("bad g->status in ready")
659 // status is Gwaiting or Gscanwaiting, make Grunnable and put on runq
660 casgstatus(gp, _Gwaiting, _Grunnable)
661 runqput(_g_.m.p.ptr(), gp, next)
662 if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 {
663 wakep()
665 releasem(mp)
668 // freezeStopWait is a large value that freezetheworld sets
669 // sched.stopwait to in order to request that all Gs permanently stop.
670 const freezeStopWait = 0x7fffffff
672 // freezing is set to non-zero if the runtime is trying to freeze the
673 // world.
674 var freezing uint32
676 // Similar to stopTheWorld but best-effort and can be called several times.
677 // There is no reverse operation, used during crashing.
678 // This function must not lock any mutexes.
679 func freezetheworld() {
680 atomic.Store(&freezing, 1)
681 // stopwait and preemption requests can be lost
682 // due to races with concurrently executing threads,
683 // so try several times
684 for i := 0; i < 5; i++ {
685 // this should tell the scheduler to not start any new goroutines
686 sched.stopwait = freezeStopWait
687 atomic.Store(&sched.gcwaiting, 1)
688 // this should stop running goroutines
689 if !preemptall() {
690 break // no running goroutines
692 usleep(1000)
694 // to be sure
695 usleep(1000)
696 preemptall()
697 usleep(1000)
700 // All reads and writes of g's status go through readgstatus, casgstatus
701 // castogscanstatus, casfrom_Gscanstatus.
702 //go:nosplit
703 func readgstatus(gp *g) uint32 {
704 return atomic.Load(&gp.atomicstatus)
707 // Ownership of gcscanvalid:
709 // If gp is running (meaning status == _Grunning or _Grunning|_Gscan),
710 // then gp owns gp.gcscanvalid, and other goroutines must not modify it.
712 // Otherwise, a second goroutine can lock the scan state by setting _Gscan
713 // in the status bit and then modify gcscanvalid, and then unlock the scan state.
715 // Note that the first condition implies an exception to the second:
716 // if a second goroutine changes gp's status to _Grunning|_Gscan,
717 // that second goroutine still does not have the right to modify gcscanvalid.
719 // The Gscanstatuses are acting like locks and this releases them.
720 // If it proves to be a performance hit we should be able to make these
721 // simple atomic stores but for now we are going to throw if
722 // we see an inconsistent state.
723 func casfrom_Gscanstatus(gp *g, oldval, newval uint32) {
724 success := false
726 // Check that transition is valid.
727 switch oldval {
728 default:
729 print("runtime: casfrom_Gscanstatus bad oldval gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
730 dumpgstatus(gp)
731 throw("casfrom_Gscanstatus:top gp->status is not in scan state")
732 case _Gscanrunnable,
733 _Gscanwaiting,
734 _Gscanrunning,
735 _Gscansyscall:
736 if newval == oldval&^_Gscan {
737 success = atomic.Cas(&gp.atomicstatus, oldval, newval)
740 if !success {
741 print("runtime: casfrom_Gscanstatus failed gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
742 dumpgstatus(gp)
743 throw("casfrom_Gscanstatus: gp->status is not in scan state")
747 // This will return false if the gp is not in the expected status and the cas fails.
748 // This acts like a lock acquire while the casfromgstatus acts like a lock release.
749 func castogscanstatus(gp *g, oldval, newval uint32) bool {
750 switch oldval {
751 case _Grunnable,
752 _Grunning,
753 _Gwaiting,
754 _Gsyscall:
755 if newval == oldval|_Gscan {
756 return atomic.Cas(&gp.atomicstatus, oldval, newval)
759 print("runtime: castogscanstatus oldval=", hex(oldval), " newval=", hex(newval), "\n")
760 throw("castogscanstatus")
761 panic("not reached")
764 // If asked to move to or from a Gscanstatus this will throw. Use the castogscanstatus
765 // and casfrom_Gscanstatus instead.
766 // casgstatus will loop if the g->atomicstatus is in a Gscan status until the routine that
767 // put it in the Gscan state is finished.
768 //go:nosplit
769 func casgstatus(gp *g, oldval, newval uint32) {
770 if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval {
771 systemstack(func() {
772 print("runtime: casgstatus: oldval=", hex(oldval), " newval=", hex(newval), "\n")
773 throw("casgstatus: bad incoming values")
777 if oldval == _Grunning && gp.gcscanvalid {
778 // If oldvall == _Grunning, then the actual status must be
779 // _Grunning or _Grunning|_Gscan; either way,
780 // we own gp.gcscanvalid, so it's safe to read.
781 // gp.gcscanvalid must not be true when we are running.
782 systemstack(func() {
783 print("runtime: casgstatus ", hex(oldval), "->", hex(newval), " gp.status=", hex(gp.atomicstatus), " gp.gcscanvalid=true\n")
784 throw("casgstatus")
788 // See https://golang.org/cl/21503 for justification of the yield delay.
789 const yieldDelay = 5 * 1000
790 var nextYield int64
792 // loop if gp->atomicstatus is in a scan state giving
793 // GC time to finish and change the state to oldval.
794 for i := 0; !atomic.Cas(&gp.atomicstatus, oldval, newval); i++ {
795 if oldval == _Gwaiting && gp.atomicstatus == _Grunnable {
796 throw("casgstatus: waiting for Gwaiting but is Grunnable")
798 // Help GC if needed.
799 // if gp.preemptscan && !gp.gcworkdone && (oldval == _Grunning || oldval == _Gsyscall) {
800 // gp.preemptscan = false
801 // systemstack(func() {
802 // gcphasework(gp)
803 // })
804 // }
805 // But meanwhile just yield.
806 if i == 0 {
807 nextYield = nanotime() + yieldDelay
809 if nanotime() < nextYield {
810 for x := 0; x < 10 && gp.atomicstatus != oldval; x++ {
811 procyield(1)
813 } else {
814 osyield()
815 nextYield = nanotime() + yieldDelay/2
818 if newval == _Grunning {
819 gp.gcscanvalid = false
823 // scang blocks until gp's stack has been scanned.
824 // It might be scanned by scang or it might be scanned by the goroutine itself.
825 // Either way, the stack scan has completed when scang returns.
826 func scang(gp *g, gcw *gcWork) {
827 // Invariant; we (the caller, markroot for a specific goroutine) own gp.gcscandone.
828 // Nothing is racing with us now, but gcscandone might be set to true left over
829 // from an earlier round of stack scanning (we scan twice per GC).
830 // We use gcscandone to record whether the scan has been done during this round.
832 gp.gcscandone = false
834 // See https://golang.org/cl/21503 for justification of the yield delay.
835 const yieldDelay = 10 * 1000
836 var nextYield int64
838 // Endeavor to get gcscandone set to true,
839 // either by doing the stack scan ourselves or by coercing gp to scan itself.
840 // gp.gcscandone can transition from false to true when we're not looking
841 // (if we asked for preemption), so any time we lock the status using
842 // castogscanstatus we have to double-check that the scan is still not done.
843 loop:
844 for i := 0; !gp.gcscandone; i++ {
845 switch s := readgstatus(gp); s {
846 default:
847 dumpgstatus(gp)
848 throw("stopg: invalid status")
850 case _Gdead:
851 // No stack.
852 gp.gcscandone = true
853 break loop
855 case _Gcopystack:
856 // Stack being switched. Go around again.
858 case _Gsyscall:
859 if usestackmaps {
860 // Claim goroutine by setting scan bit.
861 // Racing with execution or readying of gp.
862 // The scan bit keeps them from running
863 // the goroutine until we're done.
864 if castogscanstatus(gp, s, s|_Gscan) {
865 if gp.scanningself {
866 // Don't try to scan the stack
867 // if the goroutine is going to do
868 // it itself.
869 // FIXME: can this happen?
870 restartg(gp)
871 break
873 if !gp.gcscandone {
874 // Send a signal to let the goroutine scan
875 // itself. This races with enter/exitsyscall.
876 // If the goroutine is not stopped at a safepoint,
877 // it will not scan the stack and we'll try again.
878 mp := gp.m
879 noteclear(&mp.scannote)
880 gp.scangcw = uintptr(unsafe.Pointer(gcw))
881 tgkill(getpid(), _pid_t(mp.procid), _SIGURG)
883 // Wait for gp to scan its own stack.
884 notesleep(&mp.scannote)
886 if !gp.gcscandone {
887 // The signal delivered at a bad time.
888 // Try again.
889 restartg(gp)
890 break
893 restartg(gp)
894 break loop
896 break
898 fallthrough
900 case _Grunnable, _Gwaiting:
901 // Claim goroutine by setting scan bit.
902 // Racing with execution or readying of gp.
903 // The scan bit keeps them from running
904 // the goroutine until we're done.
905 if castogscanstatus(gp, s, s|_Gscan) {
906 if gp.scanningself {
907 // Don't try to scan the stack
908 // if the goroutine is going to do
909 // it itself.
910 restartg(gp)
911 break
913 if !gp.gcscandone {
914 scanstack(gp, gcw)
915 gp.gcscandone = true
917 restartg(gp)
918 break loop
921 case _Gexitingsyscall:
922 // This is a transient state during which we should not scan its stack.
923 // Try again.
925 case _Gscanwaiting:
926 // newstack is doing a scan for us right now. Wait.
928 case _Gscanrunning:
929 // checkPreempt is scanning. Wait.
931 case _Grunning:
932 // Goroutine running. Try to preempt execution so it can scan itself.
933 // The preemption handler (in newstack) does the actual scan.
935 // Optimization: if there is already a pending preemption request
936 // (from the previous loop iteration), don't bother with the atomics.
937 if gp.preemptscan && gp.preempt {
938 break
941 // Ask for preemption and self scan.
942 if castogscanstatus(gp, _Grunning, _Gscanrunning) {
943 if !gp.gcscandone {
944 gp.preemptscan = true
945 gp.preempt = true
947 casfrom_Gscanstatus(gp, _Gscanrunning, _Grunning)
951 if i == 0 {
952 nextYield = nanotime() + yieldDelay
954 if nanotime() < nextYield {
955 procyield(10)
956 } else {
957 osyield()
958 nextYield = nanotime() + yieldDelay/2
962 gp.preemptscan = false // cancel scan request if no longer needed
965 // The GC requests that this routine be moved from a scanmumble state to a mumble state.
966 func restartg(gp *g) {
967 if gp.scang != 0 || gp.scangcw != 0 {
968 print("g ", gp.goid, "is being scanned scang=", gp.scang, " scangcw=", gp.scangcw, "\n")
969 throw("restartg: being scanned")
972 s := readgstatus(gp)
973 switch s {
974 default:
975 dumpgstatus(gp)
976 throw("restartg: unexpected status")
978 case _Gdead:
979 // ok
981 case _Gscanrunnable,
982 _Gscanwaiting,
983 _Gscansyscall:
984 casfrom_Gscanstatus(gp, s, s&^_Gscan)
988 // stopTheWorld stops all P's from executing goroutines, interrupting
989 // all goroutines at GC safe points and records reason as the reason
990 // for the stop. On return, only the current goroutine's P is running.
991 // stopTheWorld must not be called from a system stack and the caller
992 // must not hold worldsema. The caller must call startTheWorld when
993 // other P's should resume execution.
995 // stopTheWorld is safe for multiple goroutines to call at the
996 // same time. Each will execute its own stop, and the stops will
997 // be serialized.
999 // This is also used by routines that do stack dumps. If the system is
1000 // in panic or being exited, this may not reliably stop all
1001 // goroutines.
1002 func stopTheWorld(reason string) {
1003 semacquire(&worldsema)
1004 getg().m.preemptoff = reason
1005 systemstack(stopTheWorldWithSema)
1008 // startTheWorld undoes the effects of stopTheWorld.
1009 func startTheWorld() {
1010 systemstack(func() { startTheWorldWithSema(false) })
1011 // worldsema must be held over startTheWorldWithSema to ensure
1012 // gomaxprocs cannot change while worldsema is held.
1013 semrelease(&worldsema)
1014 getg().m.preemptoff = ""
1017 // Holding worldsema grants an M the right to try to stop the world
1018 // and prevents gomaxprocs from changing concurrently.
1019 var worldsema uint32 = 1
1021 // stopTheWorldWithSema is the core implementation of stopTheWorld.
1022 // The caller is responsible for acquiring worldsema and disabling
1023 // preemption first and then should stopTheWorldWithSema on the system
1024 // stack:
1026 // semacquire(&worldsema, 0)
1027 // m.preemptoff = "reason"
1028 // systemstack(stopTheWorldWithSema)
1030 // When finished, the caller must either call startTheWorld or undo
1031 // these three operations separately:
1033 // m.preemptoff = ""
1034 // systemstack(startTheWorldWithSema)
1035 // semrelease(&worldsema)
1037 // It is allowed to acquire worldsema once and then execute multiple
1038 // startTheWorldWithSema/stopTheWorldWithSema pairs.
1039 // Other P's are able to execute between successive calls to
1040 // startTheWorldWithSema and stopTheWorldWithSema.
1041 // Holding worldsema causes any other goroutines invoking
1042 // stopTheWorld to block.
1043 func stopTheWorldWithSema() {
1044 _g_ := getg()
1046 // If we hold a lock, then we won't be able to stop another M
1047 // that is blocked trying to acquire the lock.
1048 if _g_.m.locks > 0 {
1049 throw("stopTheWorld: holding locks")
1052 lock(&sched.lock)
1053 sched.stopwait = gomaxprocs
1054 atomic.Store(&sched.gcwaiting, 1)
1055 preemptall()
1056 // stop current P
1057 _g_.m.p.ptr().status = _Pgcstop // Pgcstop is only diagnostic.
1058 sched.stopwait--
1059 // try to retake all P's in Psyscall status
1060 for _, p := range allp {
1061 s := p.status
1062 if s == _Psyscall && atomic.Cas(&p.status, s, _Pgcstop) {
1063 if trace.enabled {
1064 traceGoSysBlock(p)
1065 traceProcStop(p)
1067 p.syscalltick++
1068 sched.stopwait--
1071 // stop idle P's
1072 for {
1073 p := pidleget()
1074 if p == nil {
1075 break
1077 p.status = _Pgcstop
1078 sched.stopwait--
1080 wait := sched.stopwait > 0
1081 unlock(&sched.lock)
1083 // wait for remaining P's to stop voluntarily
1084 if wait {
1085 for {
1086 // wait for 100us, then try to re-preempt in case of any races
1087 if notetsleep(&sched.stopnote, 100*1000) {
1088 noteclear(&sched.stopnote)
1089 break
1091 preemptall()
1095 // sanity checks
1096 bad := ""
1097 if sched.stopwait != 0 {
1098 bad = "stopTheWorld: not stopped (stopwait != 0)"
1099 } else {
1100 for _, p := range allp {
1101 if p.status != _Pgcstop {
1102 bad = "stopTheWorld: not stopped (status != _Pgcstop)"
1106 if atomic.Load(&freezing) != 0 {
1107 // Some other thread is panicking. This can cause the
1108 // sanity checks above to fail if the panic happens in
1109 // the signal handler on a stopped thread. Either way,
1110 // we should halt this thread.
1111 lock(&deadlock)
1112 lock(&deadlock)
1114 if bad != "" {
1115 throw(bad)
1119 func startTheWorldWithSema(emitTraceEvent bool) int64 {
1120 mp := acquirem() // disable preemption because it can be holding p in a local var
1121 if netpollinited() {
1122 list := netpoll(false) // non-blocking
1123 injectglist(&list)
1125 lock(&sched.lock)
1127 procs := gomaxprocs
1128 if newprocs != 0 {
1129 procs = newprocs
1130 newprocs = 0
1132 p1 := procresize(procs)
1133 sched.gcwaiting = 0
1134 if sched.sysmonwait != 0 {
1135 sched.sysmonwait = 0
1136 notewakeup(&sched.sysmonnote)
1138 unlock(&sched.lock)
1140 for p1 != nil {
1141 p := p1
1142 p1 = p1.link.ptr()
1143 if p.m != 0 {
1144 mp := p.m.ptr()
1145 p.m = 0
1146 if mp.nextp != 0 {
1147 throw("startTheWorld: inconsistent mp->nextp")
1149 mp.nextp.set(p)
1150 notewakeup(&mp.park)
1151 } else {
1152 // Start M to run P. Do not start another M below.
1153 newm(nil, p)
1157 // Capture start-the-world time before doing clean-up tasks.
1158 startTime := nanotime()
1159 if emitTraceEvent {
1160 traceGCSTWDone()
1163 // Wakeup an additional proc in case we have excessive runnable goroutines
1164 // in local queues or in the global queue. If we don't, the proc will park itself.
1165 // If we have lots of excessive work, resetspinning will unpark additional procs as necessary.
1166 if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 {
1167 wakep()
1170 releasem(mp)
1172 return startTime
1175 // First function run by a new goroutine.
1176 // This is passed to makecontext.
1177 func kickoff() {
1178 gp := getg()
1180 if gp.traceback != 0 {
1181 gtraceback(gp)
1184 fv := gp.entry
1185 param := gp.param
1187 // When running on the g0 stack we can wind up here without a p,
1188 // for example from mcall(exitsyscall0) in exitsyscall, in
1189 // which case we can not run a write barrier.
1190 // It is also possible for us to get here from the systemstack
1191 // call in wbBufFlush, at which point the write barrier buffer
1192 // is full and we can not run a write barrier.
1193 // Setting gp.entry = nil or gp.param = nil will try to run a
1194 // write barrier, so if we are on the g0 stack due to mcall
1195 // (systemstack calls mcall) then clear the field using uintptr.
1196 // This is OK when gp.param is gp.m.curg, as curg will be kept
1197 // alive elsewhere, and gp.entry always points into g, or
1198 // to a statically allocated value, or (in the case of mcall)
1199 // to the stack.
1200 if gp == gp.m.g0 && gp.param == unsafe.Pointer(gp.m.curg) {
1201 *(*uintptr)(unsafe.Pointer(&gp.entry)) = 0
1202 *(*uintptr)(unsafe.Pointer(&gp.param)) = 0
1203 } else if gp.m.p == 0 {
1204 throw("no p in kickoff")
1205 } else {
1206 gp.entry = nil
1207 gp.param = nil
1210 // Record the entry SP to help stack scan.
1211 gp.entrysp = getsp()
1213 fv(param)
1214 goexit1()
1217 func mstart1() {
1218 _g_ := getg()
1220 if _g_ != _g_.m.g0 {
1221 throw("bad runtime·mstart")
1224 asminit()
1226 // Install signal handlers; after minit so that minit can
1227 // prepare the thread to be able to handle the signals.
1228 // For gccgo minit was called by C code.
1229 if _g_.m == &m0 {
1230 mstartm0()
1233 if fn := _g_.m.mstartfn; fn != nil {
1234 fn()
1237 if _g_.m != &m0 {
1238 acquirep(_g_.m.nextp.ptr())
1239 _g_.m.nextp = 0
1241 schedule()
1244 // mstartm0 implements part of mstart1 that only runs on the m0.
1246 // Write barriers are allowed here because we know the GC can't be
1247 // running yet, so they'll be no-ops.
1249 //go:yeswritebarrierrec
1250 func mstartm0() {
1251 // Create an extra M for callbacks on threads not created by Go.
1252 // An extra M is also needed on Windows for callbacks created by
1253 // syscall.NewCallback. See issue #6751 for details.
1254 if (iscgo || GOOS == "windows") && !cgoHasExtraM {
1255 cgoHasExtraM = true
1256 newextram()
1258 initsig(false)
1261 // mexit tears down and exits the current thread.
1263 // Don't call this directly to exit the thread, since it must run at
1264 // the top of the thread stack. Instead, use gogo(&_g_.m.g0.sched) to
1265 // unwind the stack to the point that exits the thread.
1267 // It is entered with m.p != nil, so write barriers are allowed. It
1268 // will release the P before exiting.
1270 //go:yeswritebarrierrec
1271 func mexit(osStack bool) {
1272 g := getg()
1273 m := g.m
1275 if m == &m0 {
1276 // This is the main thread. Just wedge it.
1278 // On Linux, exiting the main thread puts the process
1279 // into a non-waitable zombie state. On Plan 9,
1280 // exiting the main thread unblocks wait even though
1281 // other threads are still running. On Solaris we can
1282 // neither exitThread nor return from mstart. Other
1283 // bad things probably happen on other platforms.
1285 // We could try to clean up this M more before wedging
1286 // it, but that complicates signal handling.
1287 handoffp(releasep())
1288 lock(&sched.lock)
1289 sched.nmfreed++
1290 checkdead()
1291 unlock(&sched.lock)
1292 notesleep(&m.park)
1293 throw("locked m0 woke up")
1296 sigblock()
1297 unminit()
1299 // Free the gsignal stack.
1300 if m.gsignal != nil {
1301 stackfree(m.gsignal)
1304 // Remove m from allm.
1305 lock(&sched.lock)
1306 for pprev := &allm; *pprev != nil; pprev = &(*pprev).alllink {
1307 if *pprev == m {
1308 *pprev = m.alllink
1309 goto found
1312 throw("m not found in allm")
1313 found:
1314 if !osStack {
1315 // Delay reaping m until it's done with the stack.
1317 // If this is using an OS stack, the OS will free it
1318 // so there's no need for reaping.
1319 atomic.Store(&m.freeWait, 1)
1320 // Put m on the free list, though it will not be reaped until
1321 // freeWait is 0. Note that the free list must not be linked
1322 // through alllink because some functions walk allm without
1323 // locking, so may be using alllink.
1324 m.freelink = sched.freem
1325 sched.freem = m
1327 unlock(&sched.lock)
1329 // Release the P.
1330 handoffp(releasep())
1331 // After this point we must not have write barriers.
1333 // Invoke the deadlock detector. This must happen after
1334 // handoffp because it may have started a new M to take our
1335 // P's work.
1336 lock(&sched.lock)
1337 sched.nmfreed++
1338 checkdead()
1339 unlock(&sched.lock)
1341 if osStack {
1342 // Return from mstart and let the system thread
1343 // library free the g0 stack and terminate the thread.
1344 return
1347 // mstart is the thread's entry point, so there's nothing to
1348 // return to. Exit the thread directly. exitThread will clear
1349 // m.freeWait when it's done with the stack and the m can be
1350 // reaped.
1351 exitThread(&m.freeWait)
1354 // forEachP calls fn(p) for every P p when p reaches a GC safe point.
1355 // If a P is currently executing code, this will bring the P to a GC
1356 // safe point and execute fn on that P. If the P is not executing code
1357 // (it is idle or in a syscall), this will call fn(p) directly while
1358 // preventing the P from exiting its state. This does not ensure that
1359 // fn will run on every CPU executing Go code, but it acts as a global
1360 // memory barrier. GC uses this as a "ragged barrier."
1362 // The caller must hold worldsema.
1364 //go:systemstack
1365 func forEachP(fn func(*p)) {
1366 mp := acquirem()
1367 _p_ := getg().m.p.ptr()
1369 lock(&sched.lock)
1370 if sched.safePointWait != 0 {
1371 throw("forEachP: sched.safePointWait != 0")
1373 sched.safePointWait = gomaxprocs - 1
1374 sched.safePointFn = fn
1376 // Ask all Ps to run the safe point function.
1377 for _, p := range allp {
1378 if p != _p_ {
1379 atomic.Store(&p.runSafePointFn, 1)
1382 preemptall()
1384 // Any P entering _Pidle or _Psyscall from now on will observe
1385 // p.runSafePointFn == 1 and will call runSafePointFn when
1386 // changing its status to _Pidle/_Psyscall.
1388 // Run safe point function for all idle Ps. sched.pidle will
1389 // not change because we hold sched.lock.
1390 for p := sched.pidle.ptr(); p != nil; p = p.link.ptr() {
1391 if atomic.Cas(&p.runSafePointFn, 1, 0) {
1392 fn(p)
1393 sched.safePointWait--
1397 wait := sched.safePointWait > 0
1398 unlock(&sched.lock)
1400 // Run fn for the current P.
1401 fn(_p_)
1403 // Force Ps currently in _Psyscall into _Pidle and hand them
1404 // off to induce safe point function execution.
1405 for _, p := range allp {
1406 s := p.status
1407 if s == _Psyscall && p.runSafePointFn == 1 && atomic.Cas(&p.status, s, _Pidle) {
1408 if trace.enabled {
1409 traceGoSysBlock(p)
1410 traceProcStop(p)
1412 p.syscalltick++
1413 handoffp(p)
1417 // Wait for remaining Ps to run fn.
1418 if wait {
1419 for {
1420 // Wait for 100us, then try to re-preempt in
1421 // case of any races.
1423 // Requires system stack.
1424 if notetsleep(&sched.safePointNote, 100*1000) {
1425 noteclear(&sched.safePointNote)
1426 break
1428 preemptall()
1431 if sched.safePointWait != 0 {
1432 throw("forEachP: not done")
1434 for _, p := range allp {
1435 if p.runSafePointFn != 0 {
1436 throw("forEachP: P did not run fn")
1440 lock(&sched.lock)
1441 sched.safePointFn = nil
1442 unlock(&sched.lock)
1443 releasem(mp)
1446 // runSafePointFn runs the safe point function, if any, for this P.
1447 // This should be called like
1449 // if getg().m.p.runSafePointFn != 0 {
1450 // runSafePointFn()
1451 // }
1453 // runSafePointFn must be checked on any transition in to _Pidle or
1454 // _Psyscall to avoid a race where forEachP sees that the P is running
1455 // just before the P goes into _Pidle/_Psyscall and neither forEachP
1456 // nor the P run the safe-point function.
1457 func runSafePointFn() {
1458 p := getg().m.p.ptr()
1459 // Resolve the race between forEachP running the safe-point
1460 // function on this P's behalf and this P running the
1461 // safe-point function directly.
1462 if !atomic.Cas(&p.runSafePointFn, 1, 0) {
1463 return
1465 sched.safePointFn(p)
1466 lock(&sched.lock)
1467 sched.safePointWait--
1468 if sched.safePointWait == 0 {
1469 notewakeup(&sched.safePointNote)
1471 unlock(&sched.lock)
1474 // Allocate a new m unassociated with any thread.
1475 // Can use p for allocation context if needed.
1476 // fn is recorded as the new m's m.mstartfn.
1478 // This function is allowed to have write barriers even if the caller
1479 // isn't because it borrows _p_.
1481 //go:yeswritebarrierrec
1482 func allocm(_p_ *p, fn func(), allocatestack bool) (mp *m, g0Stack unsafe.Pointer, g0StackSize uintptr) {
1483 _g_ := getg()
1484 acquirem() // disable GC because it can be called from sysmon
1485 if _g_.m.p == 0 {
1486 acquirep(_p_) // temporarily borrow p for mallocs in this function
1489 // Release the free M list. We need to do this somewhere and
1490 // this may free up a stack we can use.
1491 if sched.freem != nil {
1492 lock(&sched.lock)
1493 var newList *m
1494 for freem := sched.freem; freem != nil; {
1495 if freem.freeWait != 0 {
1496 next := freem.freelink
1497 freem.freelink = newList
1498 newList = freem
1499 freem = next
1500 continue
1502 stackfree(freem.g0)
1503 freem = freem.freelink
1505 sched.freem = newList
1506 unlock(&sched.lock)
1509 mp = new(m)
1510 mp.mstartfn = fn
1511 mcommoninit(mp)
1513 mp.g0 = malg(allocatestack, false, &g0Stack, &g0StackSize)
1514 mp.g0.m = mp
1516 if _p_ == _g_.m.p.ptr() {
1517 releasep()
1519 releasem(_g_.m)
1521 return mp, g0Stack, g0StackSize
1524 // needm is called when a cgo callback happens on a
1525 // thread without an m (a thread not created by Go).
1526 // In this case, needm is expected to find an m to use
1527 // and return with m, g initialized correctly.
1528 // Since m and g are not set now (likely nil, but see below)
1529 // needm is limited in what routines it can call. In particular
1530 // it can only call nosplit functions (textflag 7) and cannot
1531 // do any scheduling that requires an m.
1533 // In order to avoid needing heavy lifting here, we adopt
1534 // the following strategy: there is a stack of available m's
1535 // that can be stolen. Using compare-and-swap
1536 // to pop from the stack has ABA races, so we simulate
1537 // a lock by doing an exchange (via Casuintptr) to steal the stack
1538 // head and replace the top pointer with MLOCKED (1).
1539 // This serves as a simple spin lock that we can use even
1540 // without an m. The thread that locks the stack in this way
1541 // unlocks the stack by storing a valid stack head pointer.
1543 // In order to make sure that there is always an m structure
1544 // available to be stolen, we maintain the invariant that there
1545 // is always one more than needed. At the beginning of the
1546 // program (if cgo is in use) the list is seeded with a single m.
1547 // If needm finds that it has taken the last m off the list, its job
1548 // is - once it has installed its own m so that it can do things like
1549 // allocate memory - to create a spare m and put it on the list.
1551 // Each of these extra m's also has a g0 and a curg that are
1552 // pressed into service as the scheduling stack and current
1553 // goroutine for the duration of the cgo callback.
1555 // When the callback is done with the m, it calls dropm to
1556 // put the m back on the list.
1557 //go:nosplit
1558 func needm(x byte) {
1559 if (iscgo || GOOS == "windows") && !cgoHasExtraM {
1560 // Can happen if C/C++ code calls Go from a global ctor.
1561 // Can also happen on Windows if a global ctor uses a
1562 // callback created by syscall.NewCallback. See issue #6751
1563 // for details.
1565 // Can not throw, because scheduler is not initialized yet.
1566 write(2, unsafe.Pointer(&earlycgocallback[0]), int32(len(earlycgocallback)))
1567 exit(1)
1570 // Lock extra list, take head, unlock popped list.
1571 // nilokay=false is safe here because of the invariant above,
1572 // that the extra list always contains or will soon contain
1573 // at least one m.
1574 mp := lockextra(false)
1576 // Set needextram when we've just emptied the list,
1577 // so that the eventual call into cgocallbackg will
1578 // allocate a new m for the extra list. We delay the
1579 // allocation until then so that it can be done
1580 // after exitsyscall makes sure it is okay to be
1581 // running at all (that is, there's no garbage collection
1582 // running right now).
1583 mp.needextram = mp.schedlink == 0
1584 extraMCount--
1585 unlockextra(mp.schedlink.ptr())
1587 // Save and block signals before installing g.
1588 // Once g is installed, any incoming signals will try to execute,
1589 // but we won't have the sigaltstack settings and other data
1590 // set up appropriately until the end of minit, which will
1591 // unblock the signals. This is the same dance as when
1592 // starting a new m to run Go code via newosproc.
1593 msigsave(mp)
1594 sigblock()
1596 // Install g (= m->curg).
1597 setg(mp.curg)
1599 // Initialize this thread to use the m.
1600 asminit()
1601 minit()
1603 setGContext()
1605 // mp.curg is now a real goroutine.
1606 casgstatus(mp.curg, _Gdead, _Gsyscall)
1607 atomic.Xadd(&sched.ngsys, -1)
1610 var earlycgocallback = []byte("fatal error: cgo callback before cgo call\n")
1612 // newextram allocates m's and puts them on the extra list.
1613 // It is called with a working local m, so that it can do things
1614 // like call schedlock and allocate.
1615 func newextram() {
1616 c := atomic.Xchg(&extraMWaiters, 0)
1617 if c > 0 {
1618 for i := uint32(0); i < c; i++ {
1619 oneNewExtraM()
1621 } else {
1622 // Make sure there is at least one extra M.
1623 mp := lockextra(true)
1624 unlockextra(mp)
1625 if mp == nil {
1626 oneNewExtraM()
1631 // oneNewExtraM allocates an m and puts it on the extra list.
1632 func oneNewExtraM() {
1633 // Create extra goroutine locked to extra m.
1634 // The goroutine is the context in which the cgo callback will run.
1635 // The sched.pc will never be returned to, but setting it to
1636 // goexit makes clear to the traceback routines where
1637 // the goroutine stack ends.
1638 mp, g0SP, g0SPSize := allocm(nil, nil, true)
1639 gp := malg(true, false, nil, nil)
1640 gp.gcscanvalid = true
1641 gp.gcscandone = true
1642 // malg returns status as _Gidle. Change to _Gdead before
1643 // adding to allg where GC can see it. We use _Gdead to hide
1644 // this from tracebacks and stack scans since it isn't a
1645 // "real" goroutine until needm grabs it.
1646 casgstatus(gp, _Gidle, _Gdead)
1647 gp.m = mp
1648 mp.curg = gp
1649 mp.lockedInt++
1650 mp.lockedg.set(gp)
1651 gp.lockedm.set(mp)
1652 gp.goid = int64(atomic.Xadd64(&sched.goidgen, 1))
1653 // put on allg for garbage collector
1654 allgadd(gp)
1656 // The context for gp will be set up in needm.
1657 // Here we need to set the context for g0.
1658 makeGContext(mp.g0, g0SP, g0SPSize)
1660 // gp is now on the allg list, but we don't want it to be
1661 // counted by gcount. It would be more "proper" to increment
1662 // sched.ngfree, but that requires locking. Incrementing ngsys
1663 // has the same effect.
1664 atomic.Xadd(&sched.ngsys, +1)
1666 // Add m to the extra list.
1667 mnext := lockextra(true)
1668 mp.schedlink.set(mnext)
1669 extraMCount++
1670 unlockextra(mp)
1673 // dropm is called when a cgo callback has called needm but is now
1674 // done with the callback and returning back into the non-Go thread.
1675 // It puts the current m back onto the extra list.
1677 // The main expense here is the call to signalstack to release the
1678 // m's signal stack, and then the call to needm on the next callback
1679 // from this thread. It is tempting to try to save the m for next time,
1680 // which would eliminate both these costs, but there might not be
1681 // a next time: the current thread (which Go does not control) might exit.
1682 // If we saved the m for that thread, there would be an m leak each time
1683 // such a thread exited. Instead, we acquire and release an m on each
1684 // call. These should typically not be scheduling operations, just a few
1685 // atomics, so the cost should be small.
1687 // TODO(rsc): An alternative would be to allocate a dummy pthread per-thread
1688 // variable using pthread_key_create. Unlike the pthread keys we already use
1689 // on OS X, this dummy key would never be read by Go code. It would exist
1690 // only so that we could register at thread-exit-time destructor.
1691 // That destructor would put the m back onto the extra list.
1692 // This is purely a performance optimization. The current version,
1693 // in which dropm happens on each cgo call, is still correct too.
1694 // We may have to keep the current version on systems with cgo
1695 // but without pthreads, like Windows.
1697 // CgocallBackDone calls this after releasing p, so no write barriers.
1698 //go:nowritebarrierrec
1699 func dropm() {
1700 // Clear m and g, and return m to the extra list.
1701 // After the call to setg we can only call nosplit functions
1702 // with no pointer manipulation.
1703 mp := getg().m
1705 // Return mp.curg to dead state.
1706 casgstatus(mp.curg, _Gsyscall, _Gdead)
1707 atomic.Xadd(&sched.ngsys, +1)
1709 // Block signals before unminit.
1710 // Unminit unregisters the signal handling stack (but needs g on some systems).
1711 // Setg(nil) clears g, which is the signal handler's cue not to run Go handlers.
1712 // It's important not to try to handle a signal between those two steps.
1713 sigmask := mp.sigmask
1714 sigblock()
1715 unminit()
1717 // gccgo sets the stack to Gdead here, because the splitstack
1718 // context is not initialized.
1719 atomic.Store(&mp.curg.atomicstatus, _Gdead)
1720 mp.curg.gcstack = 0
1721 mp.curg.gcnextsp = 0
1723 mnext := lockextra(true)
1724 extraMCount++
1725 mp.schedlink.set(mnext)
1727 setg(nil)
1729 // Commit the release of mp.
1730 unlockextra(mp)
1732 msigrestore(sigmask)
1735 // A helper function for EnsureDropM.
1736 func getm() uintptr {
1737 return uintptr(unsafe.Pointer(getg().m))
1740 var extram uintptr
1741 var extraMCount uint32 // Protected by lockextra
1742 var extraMWaiters uint32
1744 // lockextra locks the extra list and returns the list head.
1745 // The caller must unlock the list by storing a new list head
1746 // to extram. If nilokay is true, then lockextra will
1747 // return a nil list head if that's what it finds. If nilokay is false,
1748 // lockextra will keep waiting until the list head is no longer nil.
1749 //go:nosplit
1750 //go:nowritebarrierrec
1751 func lockextra(nilokay bool) *m {
1752 const locked = 1
1754 incr := false
1755 for {
1756 old := atomic.Loaduintptr(&extram)
1757 if old == locked {
1758 yield := osyield
1759 yield()
1760 continue
1762 if old == 0 && !nilokay {
1763 if !incr {
1764 // Add 1 to the number of threads
1765 // waiting for an M.
1766 // This is cleared by newextram.
1767 atomic.Xadd(&extraMWaiters, 1)
1768 incr = true
1770 usleep(1)
1771 continue
1773 if atomic.Casuintptr(&extram, old, locked) {
1774 return (*m)(unsafe.Pointer(old))
1776 yield := osyield
1777 yield()
1778 continue
1782 //go:nosplit
1783 //go:nowritebarrierrec
1784 func unlockextra(mp *m) {
1785 atomic.Storeuintptr(&extram, uintptr(unsafe.Pointer(mp)))
1788 // execLock serializes exec and clone to avoid bugs or unspecified behaviour
1789 // around exec'ing while creating/destroying threads. See issue #19546.
1790 var execLock rwmutex
1792 // newmHandoff contains a list of m structures that need new OS threads.
1793 // This is used by newm in situations where newm itself can't safely
1794 // start an OS thread.
1795 var newmHandoff struct {
1796 lock mutex
1798 // newm points to a list of M structures that need new OS
1799 // threads. The list is linked through m.schedlink.
1800 newm muintptr
1802 // waiting indicates that wake needs to be notified when an m
1803 // is put on the list.
1804 waiting bool
1805 wake note
1807 // haveTemplateThread indicates that the templateThread has
1808 // been started. This is not protected by lock. Use cas to set
1809 // to 1.
1810 haveTemplateThread uint32
1813 // Create a new m. It will start off with a call to fn, or else the scheduler.
1814 // fn needs to be static and not a heap allocated closure.
1815 // May run with m.p==nil, so write barriers are not allowed.
1816 //go:nowritebarrierrec
1817 func newm(fn func(), _p_ *p) {
1818 mp, _, _ := allocm(_p_, fn, false)
1819 mp.nextp.set(_p_)
1820 mp.sigmask = initSigmask
1821 if gp := getg(); gp != nil && gp.m != nil && (gp.m.lockedExt != 0 || gp.m.incgo) && GOOS != "plan9" {
1822 // We're on a locked M or a thread that may have been
1823 // started by C. The kernel state of this thread may
1824 // be strange (the user may have locked it for that
1825 // purpose). We don't want to clone that into another
1826 // thread. Instead, ask a known-good thread to create
1827 // the thread for us.
1829 // This is disabled on Plan 9. See golang.org/issue/22227.
1831 // TODO: This may be unnecessary on Windows, which
1832 // doesn't model thread creation off fork.
1833 lock(&newmHandoff.lock)
1834 if newmHandoff.haveTemplateThread == 0 {
1835 throw("on a locked thread with no template thread")
1837 mp.schedlink = newmHandoff.newm
1838 newmHandoff.newm.set(mp)
1839 if newmHandoff.waiting {
1840 newmHandoff.waiting = false
1841 notewakeup(&newmHandoff.wake)
1843 unlock(&newmHandoff.lock)
1844 return
1846 newm1(mp)
1849 func newm1(mp *m) {
1850 execLock.rlock() // Prevent process clone.
1851 newosproc(mp)
1852 execLock.runlock()
1855 // startTemplateThread starts the template thread if it is not already
1856 // running.
1858 // The calling thread must itself be in a known-good state.
1859 func startTemplateThread() {
1860 if GOARCH == "wasm" { // no threads on wasm yet
1861 return
1863 if !atomic.Cas(&newmHandoff.haveTemplateThread, 0, 1) {
1864 return
1866 newm(templateThread, nil)
1869 // templateThread is a thread in a known-good state that exists solely
1870 // to start new threads in known-good states when the calling thread
1871 // may not be in a good state.
1873 // Many programs never need this, so templateThread is started lazily
1874 // when we first enter a state that might lead to running on a thread
1875 // in an unknown state.
1877 // templateThread runs on an M without a P, so it must not have write
1878 // barriers.
1880 //go:nowritebarrierrec
1881 func templateThread() {
1882 lock(&sched.lock)
1883 sched.nmsys++
1884 checkdead()
1885 unlock(&sched.lock)
1887 for {
1888 lock(&newmHandoff.lock)
1889 for newmHandoff.newm != 0 {
1890 newm := newmHandoff.newm.ptr()
1891 newmHandoff.newm = 0
1892 unlock(&newmHandoff.lock)
1893 for newm != nil {
1894 next := newm.schedlink.ptr()
1895 newm.schedlink = 0
1896 newm1(newm)
1897 newm = next
1899 lock(&newmHandoff.lock)
1901 newmHandoff.waiting = true
1902 noteclear(&newmHandoff.wake)
1903 unlock(&newmHandoff.lock)
1904 notesleep(&newmHandoff.wake)
1908 // Stops execution of the current m until new work is available.
1909 // Returns with acquired P.
1910 func stopm() {
1911 _g_ := getg()
1913 if _g_.m.locks != 0 {
1914 throw("stopm holding locks")
1916 if _g_.m.p != 0 {
1917 throw("stopm holding p")
1919 if _g_.m.spinning {
1920 throw("stopm spinning")
1923 lock(&sched.lock)
1924 mput(_g_.m)
1925 unlock(&sched.lock)
1926 notesleep(&_g_.m.park)
1927 noteclear(&_g_.m.park)
1928 acquirep(_g_.m.nextp.ptr())
1929 _g_.m.nextp = 0
1932 func mspinning() {
1933 // startm's caller incremented nmspinning. Set the new M's spinning.
1934 getg().m.spinning = true
1937 // Schedules some M to run the p (creates an M if necessary).
1938 // If p==nil, tries to get an idle P, if no idle P's does nothing.
1939 // May run with m.p==nil, so write barriers are not allowed.
1940 // If spinning is set, the caller has incremented nmspinning and startm will
1941 // either decrement nmspinning or set m.spinning in the newly started M.
1942 //go:nowritebarrierrec
1943 func startm(_p_ *p, spinning bool) {
1944 lock(&sched.lock)
1945 if _p_ == nil {
1946 _p_ = pidleget()
1947 if _p_ == nil {
1948 unlock(&sched.lock)
1949 if spinning {
1950 // The caller incremented nmspinning, but there are no idle Ps,
1951 // so it's okay to just undo the increment and give up.
1952 if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 {
1953 throw("startm: negative nmspinning")
1956 return
1959 mp := mget()
1960 unlock(&sched.lock)
1961 if mp == nil {
1962 var fn func()
1963 if spinning {
1964 // The caller incremented nmspinning, so set m.spinning in the new M.
1965 fn = mspinning
1967 newm(fn, _p_)
1968 return
1970 if mp.spinning {
1971 throw("startm: m is spinning")
1973 if mp.nextp != 0 {
1974 throw("startm: m has p")
1976 if spinning && !runqempty(_p_) {
1977 throw("startm: p has runnable gs")
1979 // The caller incremented nmspinning, so set m.spinning in the new M.
1980 mp.spinning = spinning
1981 mp.nextp.set(_p_)
1982 notewakeup(&mp.park)
1985 // Hands off P from syscall or locked M.
1986 // Always runs without a P, so write barriers are not allowed.
1987 //go:nowritebarrierrec
1988 func handoffp(_p_ *p) {
1989 // handoffp must start an M in any situation where
1990 // findrunnable would return a G to run on _p_.
1992 // if it has local work, start it straight away
1993 if !runqempty(_p_) || sched.runqsize != 0 {
1994 startm(_p_, false)
1995 return
1997 // if it has GC work, start it straight away
1998 if gcBlackenEnabled != 0 && gcMarkWorkAvailable(_p_) {
1999 startm(_p_, false)
2000 return
2002 // no local work, check that there are no spinning/idle M's,
2003 // otherwise our help is not required
2004 if atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) == 0 && atomic.Cas(&sched.nmspinning, 0, 1) { // TODO: fast atomic
2005 startm(_p_, true)
2006 return
2008 lock(&sched.lock)
2009 if sched.gcwaiting != 0 {
2010 _p_.status = _Pgcstop
2011 sched.stopwait--
2012 if sched.stopwait == 0 {
2013 notewakeup(&sched.stopnote)
2015 unlock(&sched.lock)
2016 return
2018 if _p_.runSafePointFn != 0 && atomic.Cas(&_p_.runSafePointFn, 1, 0) {
2019 sched.safePointFn(_p_)
2020 sched.safePointWait--
2021 if sched.safePointWait == 0 {
2022 notewakeup(&sched.safePointNote)
2025 if sched.runqsize != 0 {
2026 unlock(&sched.lock)
2027 startm(_p_, false)
2028 return
2030 // If this is the last running P and nobody is polling network,
2031 // need to wakeup another M to poll network.
2032 if sched.npidle == uint32(gomaxprocs-1) && atomic.Load64(&sched.lastpoll) != 0 {
2033 unlock(&sched.lock)
2034 startm(_p_, false)
2035 return
2037 pidleput(_p_)
2038 unlock(&sched.lock)
2041 // Tries to add one more P to execute G's.
2042 // Called when a G is made runnable (newproc, ready).
2043 func wakep() {
2044 // be conservative about spinning threads
2045 if !atomic.Cas(&sched.nmspinning, 0, 1) {
2046 return
2048 startm(nil, true)
2051 // Stops execution of the current m that is locked to a g until the g is runnable again.
2052 // Returns with acquired P.
2053 func stoplockedm() {
2054 _g_ := getg()
2056 if _g_.m.lockedg == 0 || _g_.m.lockedg.ptr().lockedm.ptr() != _g_.m {
2057 throw("stoplockedm: inconsistent locking")
2059 if _g_.m.p != 0 {
2060 // Schedule another M to run this p.
2061 _p_ := releasep()
2062 handoffp(_p_)
2064 incidlelocked(1)
2065 // Wait until another thread schedules lockedg again.
2066 notesleep(&_g_.m.park)
2067 noteclear(&_g_.m.park)
2068 status := readgstatus(_g_.m.lockedg.ptr())
2069 if status&^_Gscan != _Grunnable {
2070 print("runtime:stoplockedm: g is not Grunnable or Gscanrunnable\n")
2071 dumpgstatus(_g_)
2072 throw("stoplockedm: not runnable")
2074 acquirep(_g_.m.nextp.ptr())
2075 _g_.m.nextp = 0
2078 // Schedules the locked m to run the locked gp.
2079 // May run during STW, so write barriers are not allowed.
2080 //go:nowritebarrierrec
2081 func startlockedm(gp *g) {
2082 _g_ := getg()
2084 mp := gp.lockedm.ptr()
2085 if mp == _g_.m {
2086 throw("startlockedm: locked to me")
2088 if mp.nextp != 0 {
2089 throw("startlockedm: m has p")
2091 // directly handoff current P to the locked m
2092 incidlelocked(-1)
2093 _p_ := releasep()
2094 mp.nextp.set(_p_)
2095 notewakeup(&mp.park)
2096 stopm()
2099 // Stops the current m for stopTheWorld.
2100 // Returns when the world is restarted.
2101 func gcstopm() {
2102 _g_ := getg()
2104 if sched.gcwaiting == 0 {
2105 throw("gcstopm: not waiting for gc")
2107 if _g_.m.spinning {
2108 _g_.m.spinning = false
2109 // OK to just drop nmspinning here,
2110 // startTheWorld will unpark threads as necessary.
2111 if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 {
2112 throw("gcstopm: negative nmspinning")
2115 _p_ := releasep()
2116 lock(&sched.lock)
2117 _p_.status = _Pgcstop
2118 sched.stopwait--
2119 if sched.stopwait == 0 {
2120 notewakeup(&sched.stopnote)
2122 unlock(&sched.lock)
2123 stopm()
2126 // Schedules gp to run on the current M.
2127 // If inheritTime is true, gp inherits the remaining time in the
2128 // current time slice. Otherwise, it starts a new time slice.
2129 // Never returns.
2131 // Write barriers are allowed because this is called immediately after
2132 // acquiring a P in several places.
2134 //go:yeswritebarrierrec
2135 func execute(gp *g, inheritTime bool) {
2136 _g_ := getg()
2138 casgstatus(gp, _Grunnable, _Grunning)
2139 gp.waitsince = 0
2140 gp.preempt = false
2141 if !inheritTime {
2142 _g_.m.p.ptr().schedtick++
2144 _g_.m.curg = gp
2145 gp.m = _g_.m
2147 // Check whether the profiler needs to be turned on or off.
2148 hz := sched.profilehz
2149 if _g_.m.profilehz != hz {
2150 setThreadCPUProfiler(hz)
2153 if trace.enabled {
2154 // GoSysExit has to happen when we have a P, but before GoStart.
2155 // So we emit it here.
2156 if gp.syscallsp != 0 && gp.sysblocktraced {
2157 traceGoSysExit(gp.sysexitticks)
2159 traceGoStart()
2162 gogo(gp)
2165 // Finds a runnable goroutine to execute.
2166 // Tries to steal from other P's, get g from global queue, poll network.
2167 func findrunnable() (gp *g, inheritTime bool) {
2168 _g_ := getg()
2170 // The conditions here and in handoffp must agree: if
2171 // findrunnable would return a G to run, handoffp must start
2172 // an M.
2174 top:
2175 _p_ := _g_.m.p.ptr()
2176 if sched.gcwaiting != 0 {
2177 gcstopm()
2178 goto top
2180 if _p_.runSafePointFn != 0 {
2181 runSafePointFn()
2183 if fingwait && fingwake {
2184 if gp := wakefing(); gp != nil {
2185 ready(gp, 0, true)
2188 if *cgo_yield != nil {
2189 asmcgocall(*cgo_yield, nil)
2192 // local runq
2193 if gp, inheritTime := runqget(_p_); gp != nil {
2194 return gp, inheritTime
2197 // global runq
2198 if sched.runqsize != 0 {
2199 lock(&sched.lock)
2200 gp := globrunqget(_p_, 0)
2201 unlock(&sched.lock)
2202 if gp != nil {
2203 return gp, false
2207 // Poll network.
2208 // This netpoll is only an optimization before we resort to stealing.
2209 // We can safely skip it if there are no waiters or a thread is blocked
2210 // in netpoll already. If there is any kind of logical race with that
2211 // blocked thread (e.g. it has already returned from netpoll, but does
2212 // not set lastpoll yet), this thread will do blocking netpoll below
2213 // anyway.
2214 if netpollinited() && atomic.Load(&netpollWaiters) > 0 && atomic.Load64(&sched.lastpoll) != 0 {
2215 if list := netpoll(false); !list.empty() { // non-blocking
2216 gp := list.pop()
2217 injectglist(&list)
2218 casgstatus(gp, _Gwaiting, _Grunnable)
2219 if trace.enabled {
2220 traceGoUnpark(gp, 0)
2222 return gp, false
2226 // Steal work from other P's.
2227 procs := uint32(gomaxprocs)
2228 if atomic.Load(&sched.npidle) == procs-1 {
2229 // Either GOMAXPROCS=1 or everybody, except for us, is idle already.
2230 // New work can appear from returning syscall/cgocall, network or timers.
2231 // Neither of that submits to local run queues, so no point in stealing.
2232 goto stop
2234 // If number of spinning M's >= number of busy P's, block.
2235 // This is necessary to prevent excessive CPU consumption
2236 // when GOMAXPROCS>>1 but the program parallelism is low.
2237 if !_g_.m.spinning && 2*atomic.Load(&sched.nmspinning) >= procs-atomic.Load(&sched.npidle) {
2238 goto stop
2240 if !_g_.m.spinning {
2241 _g_.m.spinning = true
2242 atomic.Xadd(&sched.nmspinning, 1)
2244 for i := 0; i < 4; i++ {
2245 for enum := stealOrder.start(fastrand()); !enum.done(); enum.next() {
2246 if sched.gcwaiting != 0 {
2247 goto top
2249 stealRunNextG := i > 2 // first look for ready queues with more than 1 g
2250 if gp := runqsteal(_p_, allp[enum.position()], stealRunNextG); gp != nil {
2251 return gp, false
2256 stop:
2258 // We have nothing to do. If we're in the GC mark phase, can
2259 // safely scan and blacken objects, and have work to do, run
2260 // idle-time marking rather than give up the P.
2261 if gcBlackenEnabled != 0 && _p_.gcBgMarkWorker != 0 && gcMarkWorkAvailable(_p_) {
2262 _p_.gcMarkWorkerMode = gcMarkWorkerIdleMode
2263 gp := _p_.gcBgMarkWorker.ptr()
2264 casgstatus(gp, _Gwaiting, _Grunnable)
2265 if trace.enabled {
2266 traceGoUnpark(gp, 0)
2268 return gp, false
2271 // wasm only:
2272 // If a callback returned and no other goroutine is awake,
2273 // then pause execution until a callback was triggered.
2274 if beforeIdle() {
2275 // At least one goroutine got woken.
2276 goto top
2279 // Before we drop our P, make a snapshot of the allp slice,
2280 // which can change underfoot once we no longer block
2281 // safe-points. We don't need to snapshot the contents because
2282 // everything up to cap(allp) is immutable.
2283 allpSnapshot := allp
2285 // return P and block
2286 lock(&sched.lock)
2287 if sched.gcwaiting != 0 || _p_.runSafePointFn != 0 {
2288 unlock(&sched.lock)
2289 goto top
2291 if sched.runqsize != 0 {
2292 gp := globrunqget(_p_, 0)
2293 unlock(&sched.lock)
2294 return gp, false
2296 if releasep() != _p_ {
2297 throw("findrunnable: wrong p")
2299 pidleput(_p_)
2300 unlock(&sched.lock)
2302 // Delicate dance: thread transitions from spinning to non-spinning state,
2303 // potentially concurrently with submission of new goroutines. We must
2304 // drop nmspinning first and then check all per-P queues again (with
2305 // #StoreLoad memory barrier in between). If we do it the other way around,
2306 // another thread can submit a goroutine after we've checked all run queues
2307 // but before we drop nmspinning; as the result nobody will unpark a thread
2308 // to run the goroutine.
2309 // If we discover new work below, we need to restore m.spinning as a signal
2310 // for resetspinning to unpark a new worker thread (because there can be more
2311 // than one starving goroutine). However, if after discovering new work
2312 // we also observe no idle Ps, it is OK to just park the current thread:
2313 // the system is fully loaded so no spinning threads are required.
2314 // Also see "Worker thread parking/unparking" comment at the top of the file.
2315 wasSpinning := _g_.m.spinning
2316 if _g_.m.spinning {
2317 _g_.m.spinning = false
2318 if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 {
2319 throw("findrunnable: negative nmspinning")
2323 // check all runqueues once again
2324 for _, _p_ := range allpSnapshot {
2325 if !runqempty(_p_) {
2326 lock(&sched.lock)
2327 _p_ = pidleget()
2328 unlock(&sched.lock)
2329 if _p_ != nil {
2330 acquirep(_p_)
2331 if wasSpinning {
2332 _g_.m.spinning = true
2333 atomic.Xadd(&sched.nmspinning, 1)
2335 goto top
2337 break
2341 // Check for idle-priority GC work again.
2342 if gcBlackenEnabled != 0 && gcMarkWorkAvailable(nil) {
2343 lock(&sched.lock)
2344 _p_ = pidleget()
2345 if _p_ != nil && _p_.gcBgMarkWorker == 0 {
2346 pidleput(_p_)
2347 _p_ = nil
2349 unlock(&sched.lock)
2350 if _p_ != nil {
2351 acquirep(_p_)
2352 if wasSpinning {
2353 _g_.m.spinning = true
2354 atomic.Xadd(&sched.nmspinning, 1)
2356 // Go back to idle GC check.
2357 goto stop
2361 // poll network
2362 if netpollinited() && atomic.Load(&netpollWaiters) > 0 && atomic.Xchg64(&sched.lastpoll, 0) != 0 {
2363 if _g_.m.p != 0 {
2364 throw("findrunnable: netpoll with p")
2366 if _g_.m.spinning {
2367 throw("findrunnable: netpoll with spinning")
2369 list := netpoll(true) // block until new work is available
2370 atomic.Store64(&sched.lastpoll, uint64(nanotime()))
2371 if !list.empty() {
2372 lock(&sched.lock)
2373 _p_ = pidleget()
2374 unlock(&sched.lock)
2375 if _p_ != nil {
2376 acquirep(_p_)
2377 gp := list.pop()
2378 injectglist(&list)
2379 casgstatus(gp, _Gwaiting, _Grunnable)
2380 if trace.enabled {
2381 traceGoUnpark(gp, 0)
2383 return gp, false
2385 injectglist(&list)
2388 stopm()
2389 goto top
2392 // pollWork reports whether there is non-background work this P could
2393 // be doing. This is a fairly lightweight check to be used for
2394 // background work loops, like idle GC. It checks a subset of the
2395 // conditions checked by the actual scheduler.
2396 func pollWork() bool {
2397 if sched.runqsize != 0 {
2398 return true
2400 p := getg().m.p.ptr()
2401 if !runqempty(p) {
2402 return true
2404 if netpollinited() && atomic.Load(&netpollWaiters) > 0 && sched.lastpoll != 0 {
2405 if list := netpoll(false); !list.empty() {
2406 injectglist(&list)
2407 return true
2410 return false
2413 func resetspinning() {
2414 _g_ := getg()
2415 if !_g_.m.spinning {
2416 throw("resetspinning: not a spinning m")
2418 _g_.m.spinning = false
2419 nmspinning := atomic.Xadd(&sched.nmspinning, -1)
2420 if int32(nmspinning) < 0 {
2421 throw("findrunnable: negative nmspinning")
2423 // M wakeup policy is deliberately somewhat conservative, so check if we
2424 // need to wakeup another P here. See "Worker thread parking/unparking"
2425 // comment at the top of the file for details.
2426 if nmspinning == 0 && atomic.Load(&sched.npidle) > 0 {
2427 wakep()
2431 // Injects the list of runnable G's into the scheduler and clears glist.
2432 // Can run concurrently with GC.
2433 func injectglist(glist *gList) {
2434 if glist.empty() {
2435 return
2437 if trace.enabled {
2438 for gp := glist.head.ptr(); gp != nil; gp = gp.schedlink.ptr() {
2439 traceGoUnpark(gp, 0)
2442 lock(&sched.lock)
2443 var n int
2444 for n = 0; !glist.empty(); n++ {
2445 gp := glist.pop()
2446 casgstatus(gp, _Gwaiting, _Grunnable)
2447 globrunqput(gp)
2449 unlock(&sched.lock)
2450 for ; n != 0 && sched.npidle != 0; n-- {
2451 startm(nil, false)
2453 *glist = gList{}
2456 // One round of scheduler: find a runnable goroutine and execute it.
2457 // Never returns.
2458 func schedule() {
2459 _g_ := getg()
2461 if _g_.m.locks != 0 {
2462 throw("schedule: holding locks")
2465 if _g_.m.lockedg != 0 {
2466 stoplockedm()
2467 execute(_g_.m.lockedg.ptr(), false) // Never returns.
2470 // We should not schedule away from a g that is executing a cgo call,
2471 // since the cgo call is using the m's g0 stack.
2472 if _g_.m.incgo {
2473 throw("schedule: in cgo")
2476 top:
2477 if sched.gcwaiting != 0 {
2478 gcstopm()
2479 goto top
2481 if _g_.m.p.ptr().runSafePointFn != 0 {
2482 runSafePointFn()
2485 var gp *g
2486 var inheritTime bool
2488 // Normal goroutines will check for need to wakeP in ready,
2489 // but GCworkers and tracereaders will not, so the check must
2490 // be done here instead.
2491 tryWakeP := false
2492 if trace.enabled || trace.shutdown {
2493 gp = traceReader()
2494 if gp != nil {
2495 casgstatus(gp, _Gwaiting, _Grunnable)
2496 traceGoUnpark(gp, 0)
2497 tryWakeP = true
2500 if gp == nil && gcBlackenEnabled != 0 {
2501 gp = gcController.findRunnableGCWorker(_g_.m.p.ptr())
2502 tryWakeP = tryWakeP || gp != nil
2504 if gp == nil {
2505 // Check the global runnable queue once in a while to ensure fairness.
2506 // Otherwise two goroutines can completely occupy the local runqueue
2507 // by constantly respawning each other.
2508 if _g_.m.p.ptr().schedtick%61 == 0 && sched.runqsize > 0 {
2509 lock(&sched.lock)
2510 gp = globrunqget(_g_.m.p.ptr(), 1)
2511 unlock(&sched.lock)
2514 if gp == nil {
2515 gp, inheritTime = runqget(_g_.m.p.ptr())
2516 if gp != nil && _g_.m.spinning {
2517 throw("schedule: spinning with local work")
2520 // Because gccgo does not implement preemption as a stack check,
2521 // we need to check for preemption here for fairness.
2522 // Otherwise goroutines on the local queue may starve
2523 // goroutines on the global queue.
2524 // Since we preempt by storing the goroutine on the global
2525 // queue, this is the only place we need to check preempt.
2526 // This does not call checkPreempt because gp is not running.
2527 if gp != nil && gp.preempt {
2528 gp.preempt = false
2529 lock(&sched.lock)
2530 globrunqput(gp)
2531 unlock(&sched.lock)
2532 goto top
2535 if gp == nil {
2536 gp, inheritTime = findrunnable() // blocks until work is available
2539 // This thread is going to run a goroutine and is not spinning anymore,
2540 // so if it was marked as spinning we need to reset it now and potentially
2541 // start a new spinning M.
2542 if _g_.m.spinning {
2543 resetspinning()
2546 if sched.disable.user && !schedEnabled(gp) {
2547 // Scheduling of this goroutine is disabled. Put it on
2548 // the list of pending runnable goroutines for when we
2549 // re-enable user scheduling and look again.
2550 lock(&sched.lock)
2551 if schedEnabled(gp) {
2552 // Something re-enabled scheduling while we
2553 // were acquiring the lock.
2554 unlock(&sched.lock)
2555 } else {
2556 sched.disable.runnable.pushBack(gp)
2557 sched.disable.n++
2558 unlock(&sched.lock)
2559 goto top
2563 // If about to schedule a not-normal goroutine (a GCworker or tracereader),
2564 // wake a P if there is one.
2565 if tryWakeP {
2566 if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 {
2567 wakep()
2570 if gp.lockedm != 0 {
2571 // Hands off own p to the locked m,
2572 // then blocks waiting for a new p.
2573 startlockedm(gp)
2574 goto top
2577 execute(gp, inheritTime)
2580 // dropg removes the association between m and the current goroutine m->curg (gp for short).
2581 // Typically a caller sets gp's status away from Grunning and then
2582 // immediately calls dropg to finish the job. The caller is also responsible
2583 // for arranging that gp will be restarted using ready at an
2584 // appropriate time. After calling dropg and arranging for gp to be
2585 // readied later, the caller can do other work but eventually should
2586 // call schedule to restart the scheduling of goroutines on this m.
2587 func dropg() {
2588 _g_ := getg()
2590 setMNoWB(&_g_.m.curg.m, nil)
2591 setGNoWB(&_g_.m.curg, nil)
2594 func parkunlock_c(gp *g, lock unsafe.Pointer) bool {
2595 unlock((*mutex)(lock))
2596 return true
2599 // park continuation on g0.
2600 func park_m(gp *g) {
2601 _g_ := getg()
2603 if trace.enabled {
2604 traceGoPark(_g_.m.waittraceev, _g_.m.waittraceskip)
2607 dropg()
2608 casgstatus(gp, _Grunning, _Gwaiting)
2610 if fn := _g_.m.waitunlockf; fn != nil {
2611 ok := fn(gp, _g_.m.waitlock)
2612 _g_.m.waitunlockf = nil
2613 _g_.m.waitlock = nil
2614 if !ok {
2615 if trace.enabled {
2616 traceGoUnpark(gp, 2)
2618 casgstatus(gp, _Gwaiting, _Grunnable)
2619 execute(gp, true) // Schedule it back, never returns.
2622 schedule()
2625 func goschedImpl(gp *g) {
2626 status := readgstatus(gp)
2627 if status&^_Gscan != _Grunning {
2628 dumpgstatus(gp)
2629 throw("bad g status")
2631 dropg()
2632 casgstatus(gp, _Grunning, _Grunnable)
2633 lock(&sched.lock)
2634 globrunqput(gp)
2635 unlock(&sched.lock)
2637 schedule()
2640 // Gosched continuation on g0.
2641 func gosched_m(gp *g) {
2642 if trace.enabled {
2643 traceGoSched()
2645 goschedImpl(gp)
2648 // goschedguarded is a forbidden-states-avoided version of gosched_m
2649 func goschedguarded_m(gp *g) {
2651 if gp.m.locks != 0 || gp.m.mallocing != 0 || gp.m.preemptoff != "" || gp.m.p.ptr().status != _Prunning {
2652 gogo(gp) // never return
2655 if trace.enabled {
2656 traceGoSched()
2658 goschedImpl(gp)
2661 func gopreempt_m(gp *g) {
2662 if trace.enabled {
2663 traceGoPreempt()
2665 goschedImpl(gp)
2668 // Finishes execution of the current goroutine.
2669 func goexit1() {
2670 if trace.enabled {
2671 traceGoEnd()
2673 mcall(goexit0)
2676 // goexit continuation on g0.
2677 func goexit0(gp *g) {
2678 _g_ := getg()
2680 casgstatus(gp, _Grunning, _Gdead)
2681 if isSystemGoroutine(gp, false) {
2682 atomic.Xadd(&sched.ngsys, -1)
2683 gp.isSystemGoroutine = false
2685 gp.m = nil
2686 locked := gp.lockedm != 0
2687 gp.lockedm = 0
2688 _g_.m.lockedg = 0
2689 gp.entry = nil
2690 gp.paniconfault = false
2691 gp._defer = nil // should be true already but just in case.
2692 gp._panic = nil // non-nil for Goexit during panic. points at stack-allocated data.
2693 gp.writebuf = nil
2694 gp.waitreason = 0
2695 gp.param = nil
2696 gp.labels = nil
2697 gp.timer = nil
2699 if gcBlackenEnabled != 0 && gp.gcAssistBytes > 0 {
2700 // Flush assist credit to the global pool. This gives
2701 // better information to pacing if the application is
2702 // rapidly creating an exiting goroutines.
2703 scanCredit := int64(gcController.assistWorkPerByte * float64(gp.gcAssistBytes))
2704 atomic.Xaddint64(&gcController.bgScanCredit, scanCredit)
2705 gp.gcAssistBytes = 0
2708 // Note that gp's stack scan is now "valid" because it has no
2709 // stack.
2710 gp.gcscanvalid = true
2711 dropg()
2713 if GOARCH == "wasm" { // no threads yet on wasm
2714 gfput(_g_.m.p.ptr(), gp)
2715 schedule() // never returns
2718 if _g_.m.lockedInt != 0 {
2719 print("invalid m->lockedInt = ", _g_.m.lockedInt, "\n")
2720 throw("internal lockOSThread error")
2722 gfput(_g_.m.p.ptr(), gp)
2723 if locked {
2724 // The goroutine may have locked this thread because
2725 // it put it in an unusual kernel state. Kill it
2726 // rather than returning it to the thread pool.
2728 // Return to mstart, which will release the P and exit
2729 // the thread.
2730 if GOOS != "plan9" { // See golang.org/issue/22227.
2731 _g_.m.exiting = true
2732 gogo(_g_.m.g0)
2733 } else {
2734 // Clear lockedExt on plan9 since we may end up re-using
2735 // this thread.
2736 _g_.m.lockedExt = 0
2739 schedule()
2742 // The goroutine g is about to enter a system call.
2743 // Record that it's not using the cpu anymore.
2744 // This is called only from the go syscall library and cgocall,
2745 // not from the low-level system calls used by the runtime.
2747 // The entersyscall function is written in C, so that it can save the
2748 // current register context so that the GC will see them.
2749 // It calls reentersyscall.
2751 // Syscall tracing:
2752 // At the start of a syscall we emit traceGoSysCall to capture the stack trace.
2753 // If the syscall does not block, that is it, we do not emit any other events.
2754 // If the syscall blocks (that is, P is retaken), retaker emits traceGoSysBlock;
2755 // when syscall returns we emit traceGoSysExit and when the goroutine starts running
2756 // (potentially instantly, if exitsyscallfast returns true) we emit traceGoStart.
2757 // To ensure that traceGoSysExit is emitted strictly after traceGoSysBlock,
2758 // we remember current value of syscalltick in m (_g_.m.syscalltick = _g_.m.p.ptr().syscalltick),
2759 // whoever emits traceGoSysBlock increments p.syscalltick afterwards;
2760 // and we wait for the increment before emitting traceGoSysExit.
2761 // Note that the increment is done even if tracing is not enabled,
2762 // because tracing can be enabled in the middle of syscall. We don't want the wait to hang.
2764 //go:nosplit
2765 //go:noinline
2766 func reentersyscall(pc, sp uintptr) {
2767 _g_ := getg()
2769 // Disable preemption because during this function g is in Gsyscall status,
2770 // but can have inconsistent g->sched, do not let GC observe it.
2771 _g_.m.locks++
2773 _g_.syscallsp = sp
2774 _g_.syscallpc = pc
2775 casgstatus(_g_, _Grunning, _Gsyscall)
2777 if trace.enabled {
2778 systemstack(traceGoSysCall)
2781 if atomic.Load(&sched.sysmonwait) != 0 {
2782 systemstack(entersyscall_sysmon)
2785 if _g_.m.p.ptr().runSafePointFn != 0 {
2786 // runSafePointFn may stack split if run on this stack
2787 systemstack(runSafePointFn)
2790 _g_.m.syscalltick = _g_.m.p.ptr().syscalltick
2791 _g_.sysblocktraced = true
2792 _g_.m.mcache = nil
2793 pp := _g_.m.p.ptr()
2794 pp.m = 0
2795 _g_.m.oldp.set(pp)
2796 _g_.m.p = 0
2797 atomic.Store(&pp.status, _Psyscall)
2798 if sched.gcwaiting != 0 {
2799 systemstack(entersyscall_gcwait)
2802 _g_.m.locks--
2805 func entersyscall_sysmon() {
2806 lock(&sched.lock)
2807 if atomic.Load(&sched.sysmonwait) != 0 {
2808 atomic.Store(&sched.sysmonwait, 0)
2809 notewakeup(&sched.sysmonnote)
2811 unlock(&sched.lock)
2814 func entersyscall_gcwait() {
2815 _g_ := getg()
2816 _p_ := _g_.m.oldp.ptr()
2818 lock(&sched.lock)
2819 if sched.stopwait > 0 && atomic.Cas(&_p_.status, _Psyscall, _Pgcstop) {
2820 if trace.enabled {
2821 traceGoSysBlock(_p_)
2822 traceProcStop(_p_)
2824 _p_.syscalltick++
2825 if sched.stopwait--; sched.stopwait == 0 {
2826 notewakeup(&sched.stopnote)
2829 unlock(&sched.lock)
2832 func reentersyscallblock(pc, sp uintptr) {
2833 _g_ := getg()
2835 _g_.m.locks++ // see comment in entersyscall
2836 _g_.throwsplit = true
2837 _g_.m.syscalltick = _g_.m.p.ptr().syscalltick
2838 _g_.sysblocktraced = true
2839 _g_.m.p.ptr().syscalltick++
2841 // Leave SP around for GC and traceback.
2842 _g_.syscallsp = sp
2843 _g_.syscallpc = pc
2844 casgstatus(_g_, _Grunning, _Gsyscall)
2845 systemstack(entersyscallblock_handoff)
2847 _g_.m.locks--
2850 func entersyscallblock_handoff() {
2851 if trace.enabled {
2852 traceGoSysCall()
2853 traceGoSysBlock(getg().m.p.ptr())
2855 handoffp(releasep())
2858 // The goroutine g exited its system call.
2859 // Arrange for it to run on a cpu again.
2860 // This is called only from the go syscall library, not
2861 // from the low-level system calls used by the runtime.
2863 // Write barriers are not allowed because our P may have been stolen.
2865 //go:nosplit
2866 //go:nowritebarrierrec
2867 func exitsyscall() {
2868 _g_ := getg()
2870 _g_.m.locks++ // see comment in entersyscall
2872 _g_.waitsince = 0
2873 oldp := _g_.m.oldp.ptr()
2874 _g_.m.oldp = 0
2875 if exitsyscallfast(oldp) {
2876 if _g_.m.mcache == nil {
2877 throw("lost mcache")
2879 if trace.enabled {
2880 if oldp != _g_.m.p.ptr() || _g_.m.syscalltick != _g_.m.p.ptr().syscalltick {
2881 systemstack(traceGoStart)
2884 // There's a cpu for us, so we can run.
2885 _g_.m.p.ptr().syscalltick++
2886 // We need to cas the status and scan before resuming...
2887 casgstatus(_g_, _Gsyscall, _Grunning)
2889 exitsyscallclear(_g_)
2890 _g_.m.locks--
2891 _g_.throwsplit = false
2893 // Check preemption, since unlike gc we don't check on
2894 // every call.
2895 if getg().preempt {
2896 checkPreempt()
2898 _g_.throwsplit = false
2900 if sched.disable.user && !schedEnabled(_g_) {
2901 // Scheduling of this goroutine is disabled.
2902 Gosched()
2905 return
2908 _g_.sysexitticks = 0
2909 if trace.enabled {
2910 // Wait till traceGoSysBlock event is emitted.
2911 // This ensures consistency of the trace (the goroutine is started after it is blocked).
2912 for oldp != nil && oldp.syscalltick == _g_.m.syscalltick {
2913 osyield()
2915 // We can't trace syscall exit right now because we don't have a P.
2916 // Tracing code can invoke write barriers that cannot run without a P.
2917 // So instead we remember the syscall exit time and emit the event
2918 // in execute when we have a P.
2919 _g_.sysexitticks = cputicks()
2922 _g_.m.locks--
2924 // Call the scheduler.
2925 mcall(exitsyscall0)
2927 if _g_.m.mcache == nil {
2928 throw("lost mcache")
2931 // Scheduler returned, so we're allowed to run now.
2932 // Delete the syscallsp information that we left for
2933 // the garbage collector during the system call.
2934 // Must wait until now because until gosched returns
2935 // we don't know for sure that the garbage collector
2936 // is not running.
2937 exitsyscallclear(_g_)
2939 _g_.m.p.ptr().syscalltick++
2940 _g_.throwsplit = false
2943 //go:nosplit
2944 func exitsyscallfast(oldp *p) bool {
2945 _g_ := getg()
2947 // Freezetheworld sets stopwait but does not retake P's.
2948 if sched.stopwait == freezeStopWait {
2949 return false
2952 // Try to re-acquire the last P.
2953 if oldp != nil && oldp.status == _Psyscall && atomic.Cas(&oldp.status, _Psyscall, _Pidle) {
2954 // There's a cpu for us, so we can run.
2955 wirep(oldp)
2956 exitsyscallfast_reacquired()
2957 return true
2960 // Try to get any other idle P.
2961 if sched.pidle != 0 {
2962 var ok bool
2963 systemstack(func() {
2964 ok = exitsyscallfast_pidle()
2965 if ok && trace.enabled {
2966 if oldp != nil {
2967 // Wait till traceGoSysBlock event is emitted.
2968 // This ensures consistency of the trace (the goroutine is started after it is blocked).
2969 for oldp.syscalltick == _g_.m.syscalltick {
2970 osyield()
2973 traceGoSysExit(0)
2976 if ok {
2977 return true
2980 return false
2983 // exitsyscallfast_reacquired is the exitsyscall path on which this G
2984 // has successfully reacquired the P it was running on before the
2985 // syscall.
2987 //go:nosplit
2988 func exitsyscallfast_reacquired() {
2989 _g_ := getg()
2990 if _g_.m.syscalltick != _g_.m.p.ptr().syscalltick {
2991 if trace.enabled {
2992 // The p was retaken and then enter into syscall again (since _g_.m.syscalltick has changed).
2993 // traceGoSysBlock for this syscall was already emitted,
2994 // but here we effectively retake the p from the new syscall running on the same p.
2995 systemstack(func() {
2996 // Denote blocking of the new syscall.
2997 traceGoSysBlock(_g_.m.p.ptr())
2998 // Denote completion of the current syscall.
2999 traceGoSysExit(0)
3002 _g_.m.p.ptr().syscalltick++
3006 func exitsyscallfast_pidle() bool {
3007 lock(&sched.lock)
3008 _p_ := pidleget()
3009 if _p_ != nil && atomic.Load(&sched.sysmonwait) != 0 {
3010 atomic.Store(&sched.sysmonwait, 0)
3011 notewakeup(&sched.sysmonnote)
3013 unlock(&sched.lock)
3014 if _p_ != nil {
3015 acquirep(_p_)
3016 return true
3018 return false
3021 // exitsyscall slow path on g0.
3022 // Failed to acquire P, enqueue gp as runnable.
3024 //go:nowritebarrierrec
3025 func exitsyscall0(gp *g) {
3026 _g_ := getg()
3028 casgstatus(gp, _Gsyscall, _Gexitingsyscall)
3029 dropg()
3030 casgstatus(gp, _Gexitingsyscall, _Grunnable)
3031 lock(&sched.lock)
3032 var _p_ *p
3033 if schedEnabled(_g_) {
3034 _p_ = pidleget()
3036 if _p_ == nil {
3037 globrunqput(gp)
3038 } else if atomic.Load(&sched.sysmonwait) != 0 {
3039 atomic.Store(&sched.sysmonwait, 0)
3040 notewakeup(&sched.sysmonnote)
3042 unlock(&sched.lock)
3043 if _p_ != nil {
3044 acquirep(_p_)
3045 execute(gp, false) // Never returns.
3047 if _g_.m.lockedg != 0 {
3048 // Wait until another thread schedules gp and so m again.
3049 stoplockedm()
3050 execute(gp, false) // Never returns.
3052 stopm()
3053 schedule() // Never returns.
3056 // exitsyscallclear clears GC-related information that we only track
3057 // during a syscall.
3058 func exitsyscallclear(gp *g) {
3059 // Garbage collector isn't running (since we are), so okay to
3060 // clear syscallsp.
3061 gp.syscallsp = 0
3063 gp.gcstack = 0
3064 gp.gcnextsp = 0
3065 memclrNoHeapPointers(unsafe.Pointer(&gp.gcregs), unsafe.Sizeof(gp.gcregs))
3068 // Code generated by cgo, and some library code, calls syscall.Entersyscall
3069 // and syscall.Exitsyscall.
3071 //go:linkname syscall_entersyscall syscall.Entersyscall
3072 //go:nosplit
3073 func syscall_entersyscall() {
3074 entersyscall()
3077 //go:linkname syscall_exitsyscall syscall.Exitsyscall
3078 //go:nosplit
3079 func syscall_exitsyscall() {
3080 exitsyscall()
3083 func beforefork() {
3084 gp := getg().m.curg
3086 // Block signals during a fork, so that the child does not run
3087 // a signal handler before exec if a signal is sent to the process
3088 // group. See issue #18600.
3089 gp.m.locks++
3090 msigsave(gp.m)
3091 sigblock()
3094 // Called from syscall package before fork.
3095 //go:linkname syscall_runtime_BeforeFork syscall.runtime_BeforeFork
3096 //go:nosplit
3097 func syscall_runtime_BeforeFork() {
3098 systemstack(beforefork)
3101 func afterfork() {
3102 gp := getg().m.curg
3104 msigrestore(gp.m.sigmask)
3106 gp.m.locks--
3109 // Called from syscall package after fork in parent.
3110 //go:linkname syscall_runtime_AfterFork syscall.runtime_AfterFork
3111 //go:nosplit
3112 func syscall_runtime_AfterFork() {
3113 systemstack(afterfork)
3116 // inForkedChild is true while manipulating signals in the child process.
3117 // This is used to avoid calling libc functions in case we are using vfork.
3118 var inForkedChild bool
3120 // Called from syscall package after fork in child.
3121 // It resets non-sigignored signals to the default handler, and
3122 // restores the signal mask in preparation for the exec.
3124 // Because this might be called during a vfork, and therefore may be
3125 // temporarily sharing address space with the parent process, this must
3126 // not change any global variables or calling into C code that may do so.
3128 //go:linkname syscall_runtime_AfterForkInChild syscall.runtime_AfterForkInChild
3129 //go:nosplit
3130 //go:nowritebarrierrec
3131 func syscall_runtime_AfterForkInChild() {
3132 // It's OK to change the global variable inForkedChild here
3133 // because we are going to change it back. There is no race here,
3134 // because if we are sharing address space with the parent process,
3135 // then the parent process can not be running concurrently.
3136 inForkedChild = true
3138 clearSignalHandlers()
3140 // When we are the child we are the only thread running,
3141 // so we know that nothing else has changed gp.m.sigmask.
3142 msigrestore(getg().m.sigmask)
3144 inForkedChild = false
3147 // Called from syscall package before Exec.
3148 //go:linkname syscall_runtime_BeforeExec syscall.runtime_BeforeExec
3149 func syscall_runtime_BeforeExec() {
3150 // Prevent thread creation during exec.
3151 execLock.lock()
3154 // Called from syscall package after Exec.
3155 //go:linkname syscall_runtime_AfterExec syscall.runtime_AfterExec
3156 func syscall_runtime_AfterExec() {
3157 execLock.unlock()
3160 // panicgonil is used for gccgo as we need to use a compiler check for
3161 // a nil func, in case we have to build a thunk.
3162 //go:linkname panicgonil
3163 func panicgonil() {
3164 getg().m.throwing = -1 // do not dump full stacks
3165 throw("go of nil func value")
3168 // Create a new g running fn passing arg as the single argument.
3169 // Put it on the queue of g's waiting to run.
3170 // The compiler turns a go statement into a call to this.
3171 //go:linkname newproc __go_go
3172 func newproc(fn uintptr, arg unsafe.Pointer) *g {
3173 _g_ := getg()
3175 if fn == 0 {
3176 _g_.m.throwing = -1 // do not dump full stacks
3177 throw("go of nil func value")
3179 acquirem() // disable preemption because it can be holding p in a local var
3181 _p_ := _g_.m.p.ptr()
3182 newg := gfget(_p_)
3183 var (
3184 sp unsafe.Pointer
3185 spsize uintptr
3187 if newg == nil {
3188 newg = malg(true, false, &sp, &spsize)
3189 casgstatus(newg, _Gidle, _Gdead)
3190 allgadd(newg) // publishes with a g->status of Gdead so GC scanner doesn't look at uninitialized stack.
3191 } else {
3192 resetNewG(newg, &sp, &spsize)
3194 newg.traceback = 0
3196 if readgstatus(newg) != _Gdead {
3197 throw("newproc1: new g is not Gdead")
3200 // Store the C function pointer into entryfn, take the address
3201 // of entryfn, convert it to a Go function value, and store
3202 // that in entry.
3203 newg.entryfn = fn
3204 var entry func(unsafe.Pointer)
3205 *(*unsafe.Pointer)(unsafe.Pointer(&entry)) = unsafe.Pointer(&newg.entryfn)
3206 newg.entry = entry
3208 newg.param = arg
3209 newg.gopc = getcallerpc()
3210 newg.ancestors = saveAncestors(_g_)
3211 newg.startpc = fn
3212 if _g_.m.curg != nil {
3213 newg.labels = _g_.m.curg.labels
3215 if isSystemGoroutine(newg, false) {
3216 atomic.Xadd(&sched.ngsys, +1)
3218 newg.gcscanvalid = false
3219 casgstatus(newg, _Gdead, _Grunnable)
3221 if _p_.goidcache == _p_.goidcacheend {
3222 // Sched.goidgen is the last allocated id,
3223 // this batch must be [sched.goidgen+1, sched.goidgen+GoidCacheBatch].
3224 // At startup sched.goidgen=0, so main goroutine receives goid=1.
3225 _p_.goidcache = atomic.Xadd64(&sched.goidgen, _GoidCacheBatch)
3226 _p_.goidcache -= _GoidCacheBatch - 1
3227 _p_.goidcacheend = _p_.goidcache + _GoidCacheBatch
3229 newg.goid = int64(_p_.goidcache)
3230 _p_.goidcache++
3231 if trace.enabled {
3232 traceGoCreate(newg, newg.startpc)
3235 makeGContext(newg, sp, spsize)
3237 runqput(_p_, newg, true)
3239 if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 && mainStarted {
3240 wakep()
3242 releasem(_g_.m)
3243 return newg
3246 // expectedSystemGoroutines counts the number of goroutines expected
3247 // to mark themselves as system goroutines. After they mark themselves
3248 // by calling setSystemGoroutine, this is decremented. NumGoroutines
3249 // uses this to wait for all system goroutines to mark themselves
3250 // before it counts them.
3251 var expectedSystemGoroutines uint32
3253 // expectSystemGoroutine is called when starting a goroutine that will
3254 // call setSystemGoroutine. It increments expectedSystemGoroutines.
3255 func expectSystemGoroutine() {
3256 atomic.Xadd(&expectedSystemGoroutines, +1)
3259 // waitForSystemGoroutines waits for all currently expected system
3260 // goroutines to register themselves.
3261 func waitForSystemGoroutines() {
3262 for atomic.Load(&expectedSystemGoroutines) > 0 {
3263 Gosched()
3264 osyield()
3268 // setSystemGoroutine marks this goroutine as a "system goroutine".
3269 // In the gc toolchain this is done by comparing startpc to a list of
3270 // saved special PCs. In gccgo that approach does not work as startpc
3271 // is often a thunk that invokes the real function with arguments,
3272 // so the thunk address never matches the saved special PCs. Instead,
3273 // since there are only a limited number of "system goroutines",
3274 // we force each one to mark itself as special.
3275 func setSystemGoroutine() {
3276 getg().isSystemGoroutine = true
3277 atomic.Xadd(&sched.ngsys, +1)
3278 atomic.Xadd(&expectedSystemGoroutines, -1)
3281 // saveAncestors copies previous ancestors of the given caller g and
3282 // includes infor for the current caller into a new set of tracebacks for
3283 // a g being created.
3284 func saveAncestors(callergp *g) *[]ancestorInfo {
3285 // Copy all prior info, except for the root goroutine (goid 0).
3286 if debug.tracebackancestors <= 0 || callergp.goid == 0 {
3287 return nil
3289 var callerAncestors []ancestorInfo
3290 if callergp.ancestors != nil {
3291 callerAncestors = *callergp.ancestors
3293 n := int32(len(callerAncestors)) + 1
3294 if n > debug.tracebackancestors {
3295 n = debug.tracebackancestors
3297 ancestors := make([]ancestorInfo, n)
3298 copy(ancestors[1:], callerAncestors)
3300 var pcs [_TracebackMaxFrames]uintptr
3301 // FIXME: This should get a traceback of callergp.
3302 // npcs := gcallers(callergp, 0, pcs[:])
3303 npcs := 0
3304 ipcs := make([]uintptr, npcs)
3305 copy(ipcs, pcs[:])
3306 ancestors[0] = ancestorInfo{
3307 pcs: ipcs,
3308 goid: callergp.goid,
3309 gopc: callergp.gopc,
3312 ancestorsp := new([]ancestorInfo)
3313 *ancestorsp = ancestors
3314 return ancestorsp
3317 // Put on gfree list.
3318 // If local list is too long, transfer a batch to the global list.
3319 func gfput(_p_ *p, gp *g) {
3320 if readgstatus(gp) != _Gdead {
3321 throw("gfput: bad status (not Gdead)")
3324 _p_.gFree.push(gp)
3325 _p_.gFree.n++
3326 if _p_.gFree.n >= 64 {
3327 lock(&sched.gFree.lock)
3328 for _p_.gFree.n >= 32 {
3329 _p_.gFree.n--
3330 gp = _p_.gFree.pop()
3331 sched.gFree.list.push(gp)
3332 sched.gFree.n++
3334 unlock(&sched.gFree.lock)
3338 // Get from gfree list.
3339 // If local list is empty, grab a batch from global list.
3340 func gfget(_p_ *p) *g {
3341 retry:
3342 if _p_.gFree.empty() && !sched.gFree.list.empty() {
3343 lock(&sched.gFree.lock)
3344 // Move a batch of free Gs to the P.
3345 for _p_.gFree.n < 32 {
3346 gp := sched.gFree.list.pop()
3347 if gp == nil {
3348 break
3350 sched.gFree.n--
3351 _p_.gFree.push(gp)
3352 _p_.gFree.n++
3354 unlock(&sched.gFree.lock)
3355 goto retry
3357 gp := _p_.gFree.pop()
3358 if gp == nil {
3359 return nil
3361 _p_.gFree.n--
3362 return gp
3365 // Purge all cached G's from gfree list to the global list.
3366 func gfpurge(_p_ *p) {
3367 lock(&sched.gFree.lock)
3368 for !_p_.gFree.empty() {
3369 gp := _p_.gFree.pop()
3370 _p_.gFree.n--
3371 sched.gFree.list.push(gp)
3372 sched.gFree.n++
3374 unlock(&sched.gFree.lock)
3377 // Breakpoint executes a breakpoint trap.
3378 func Breakpoint() {
3379 breakpoint()
3382 // dolockOSThread is called by LockOSThread and lockOSThread below
3383 // after they modify m.locked. Do not allow preemption during this call,
3384 // or else the m might be different in this function than in the caller.
3385 //go:nosplit
3386 func dolockOSThread() {
3387 if GOARCH == "wasm" {
3388 return // no threads on wasm yet
3390 _g_ := getg()
3391 _g_.m.lockedg.set(_g_)
3392 _g_.lockedm.set(_g_.m)
3395 //go:nosplit
3397 // LockOSThread wires the calling goroutine to its current operating system thread.
3398 // The calling goroutine will always execute in that thread,
3399 // and no other goroutine will execute in it,
3400 // until the calling goroutine has made as many calls to
3401 // UnlockOSThread as to LockOSThread.
3402 // If the calling goroutine exits without unlocking the thread,
3403 // the thread will be terminated.
3405 // All init functions are run on the startup thread. Calling LockOSThread
3406 // from an init function will cause the main function to be invoked on
3407 // that thread.
3409 // A goroutine should call LockOSThread before calling OS services or
3410 // non-Go library functions that depend on per-thread state.
3411 func LockOSThread() {
3412 if atomic.Load(&newmHandoff.haveTemplateThread) == 0 && GOOS != "plan9" {
3413 // If we need to start a new thread from the locked
3414 // thread, we need the template thread. Start it now
3415 // while we're in a known-good state.
3416 startTemplateThread()
3418 _g_ := getg()
3419 _g_.m.lockedExt++
3420 if _g_.m.lockedExt == 0 {
3421 _g_.m.lockedExt--
3422 panic("LockOSThread nesting overflow")
3424 dolockOSThread()
3427 //go:nosplit
3428 func lockOSThread() {
3429 getg().m.lockedInt++
3430 dolockOSThread()
3433 // dounlockOSThread is called by UnlockOSThread and unlockOSThread below
3434 // after they update m->locked. Do not allow preemption during this call,
3435 // or else the m might be in different in this function than in the caller.
3436 //go:nosplit
3437 func dounlockOSThread() {
3438 if GOARCH == "wasm" {
3439 return // no threads on wasm yet
3441 _g_ := getg()
3442 if _g_.m.lockedInt != 0 || _g_.m.lockedExt != 0 {
3443 return
3445 _g_.m.lockedg = 0
3446 _g_.lockedm = 0
3449 //go:nosplit
3451 // UnlockOSThread undoes an earlier call to LockOSThread.
3452 // If this drops the number of active LockOSThread calls on the
3453 // calling goroutine to zero, it unwires the calling goroutine from
3454 // its fixed operating system thread.
3455 // If there are no active LockOSThread calls, this is a no-op.
3457 // Before calling UnlockOSThread, the caller must ensure that the OS
3458 // thread is suitable for running other goroutines. If the caller made
3459 // any permanent changes to the state of the thread that would affect
3460 // other goroutines, it should not call this function and thus leave
3461 // the goroutine locked to the OS thread until the goroutine (and
3462 // hence the thread) exits.
3463 func UnlockOSThread() {
3464 _g_ := getg()
3465 if _g_.m.lockedExt == 0 {
3466 return
3468 _g_.m.lockedExt--
3469 dounlockOSThread()
3472 //go:nosplit
3473 func unlockOSThread() {
3474 _g_ := getg()
3475 if _g_.m.lockedInt == 0 {
3476 systemstack(badunlockosthread)
3478 _g_.m.lockedInt--
3479 dounlockOSThread()
3482 func badunlockosthread() {
3483 throw("runtime: internal error: misuse of lockOSThread/unlockOSThread")
3486 func gcount() int32 {
3487 n := int32(allglen) - sched.gFree.n - int32(atomic.Load(&sched.ngsys))
3488 for _, _p_ := range allp {
3489 n -= _p_.gFree.n
3492 // All these variables can be changed concurrently, so the result can be inconsistent.
3493 // But at least the current goroutine is running.
3494 if n < 1 {
3495 n = 1
3497 return n
3500 func mcount() int32 {
3501 return int32(sched.mnext - sched.nmfreed)
3504 var prof struct {
3505 signalLock uint32
3506 hz int32
3509 func _System() { _System() }
3510 func _ExternalCode() { _ExternalCode() }
3511 func _LostExternalCode() { _LostExternalCode() }
3512 func _GC() { _GC() }
3513 func _LostSIGPROFDuringAtomic64() { _LostSIGPROFDuringAtomic64() }
3514 func _VDSO() { _VDSO() }
3516 var _SystemPC = funcPC(_System)
3517 var _ExternalCodePC = funcPC(_ExternalCode)
3518 var _LostExternalCodePC = funcPC(_LostExternalCode)
3519 var _GCPC = funcPC(_GC)
3520 var _LostSIGPROFDuringAtomic64PC = funcPC(_LostSIGPROFDuringAtomic64)
3522 // Called if we receive a SIGPROF signal.
3523 // Called by the signal handler, may run during STW.
3524 //go:nowritebarrierrec
3525 func sigprof(pc uintptr, gp *g, mp *m) {
3526 if prof.hz == 0 {
3527 return
3530 // Profiling runs concurrently with GC, so it must not allocate.
3531 // Set a trap in case the code does allocate.
3532 // Note that on windows, one thread takes profiles of all the
3533 // other threads, so mp is usually not getg().m.
3534 // In fact mp may not even be stopped.
3535 // See golang.org/issue/17165.
3536 getg().m.mallocing++
3538 traceback := true
3540 // If SIGPROF arrived while already fetching runtime callers
3541 // we can have trouble on older systems because the unwind
3542 // library calls dl_iterate_phdr which was not reentrant in
3543 // the past. alreadyInCallers checks for that.
3544 if gp == nil || alreadyInCallers() {
3545 traceback = false
3548 var stk [maxCPUProfStack]uintptr
3549 n := 0
3550 if traceback {
3551 var stklocs [maxCPUProfStack]location
3552 n = callers(0, stklocs[:])
3554 // Issue 26595: the stack trace we've just collected is going
3555 // to include frames that we don't want to report in the CPU
3556 // profile, including signal handler frames. Here is what we
3557 // might typically see at the point of "callers" above for a
3558 // signal delivered to the application routine "interesting"
3559 // called by "main".
3561 // 0: runtime.sigprof
3562 // 1: runtime.sighandler
3563 // 2: runtime.sigtrampgo
3564 // 3: runtime.sigtramp
3565 // 4: <signal handler called>
3566 // 5: main.interesting_routine
3567 // 6: main.main
3569 // To ensure a sane profile, walk through the frames in
3570 // "stklocs" until we find the "runtime.sigtramp" frame, then
3571 // report only those frames below the frame one down from
3572 // that. On systems that don't split stack, "sigtramp" can
3573 // do a sibling call to "sigtrampgo", so use "sigtrampgo"
3574 // if we don't find "sigtramp". If for some reason
3575 // neither "runtime.sigtramp" nor "runtime.sigtrampgo" is
3576 // present, don't make any changes.
3577 framesToDiscard := 0
3578 for i := 0; i < n; i++ {
3579 if stklocs[i].function == "runtime.sigtrampgo" && i+2 < n {
3580 framesToDiscard = i + 2
3582 if stklocs[i].function == "runtime.sigtramp" && i+2 < n {
3583 framesToDiscard = i + 2
3584 break
3587 n -= framesToDiscard
3588 for i := 0; i < n; i++ {
3589 stk[i] = stklocs[i+framesToDiscard].pc
3593 if n <= 0 {
3594 // Normal traceback is impossible or has failed.
3595 // Account it against abstract "System" or "GC".
3596 n = 2
3597 stk[0] = pc
3598 if mp.preemptoff != "" {
3599 stk[1] = _GCPC + sys.PCQuantum
3600 } else {
3601 stk[1] = _SystemPC + sys.PCQuantum
3605 if prof.hz != 0 {
3606 cpuprof.add(gp, stk[:n])
3608 getg().m.mallocing--
3611 // Use global arrays rather than using up lots of stack space in the
3612 // signal handler. This is safe since while we are executing a SIGPROF
3613 // signal other SIGPROF signals are blocked.
3614 var nonprofGoStklocs [maxCPUProfStack]location
3615 var nonprofGoStk [maxCPUProfStack]uintptr
3617 // sigprofNonGo is called if we receive a SIGPROF signal on a non-Go thread,
3618 // and the signal handler collected a stack trace in sigprofCallers.
3619 // When this is called, sigprofCallersUse will be non-zero.
3620 // g is nil, and what we can do is very limited.
3621 //go:nosplit
3622 //go:nowritebarrierrec
3623 func sigprofNonGo(pc uintptr) {
3624 if prof.hz != 0 {
3625 n := callers(0, nonprofGoStklocs[:])
3627 for i := 0; i < n; i++ {
3628 nonprofGoStk[i] = nonprofGoStklocs[i].pc
3631 if n <= 0 {
3632 n = 2
3633 nonprofGoStk[0] = pc
3634 nonprofGoStk[1] = _ExternalCodePC + sys.PCQuantum
3637 cpuprof.addNonGo(nonprofGoStk[:n])
3641 // sigprofNonGoPC is called when a profiling signal arrived on a
3642 // non-Go thread and we have a single PC value, not a stack trace.
3643 // g is nil, and what we can do is very limited.
3644 //go:nosplit
3645 //go:nowritebarrierrec
3646 func sigprofNonGoPC(pc uintptr) {
3647 if prof.hz != 0 {
3648 stk := []uintptr{
3650 _ExternalCodePC + sys.PCQuantum,
3652 cpuprof.addNonGo(stk)
3656 // setcpuprofilerate sets the CPU profiling rate to hz times per second.
3657 // If hz <= 0, setcpuprofilerate turns off CPU profiling.
3658 func setcpuprofilerate(hz int32) {
3659 // Force sane arguments.
3660 if hz < 0 {
3661 hz = 0
3664 // Disable preemption, otherwise we can be rescheduled to another thread
3665 // that has profiling enabled.
3666 _g_ := getg()
3667 _g_.m.locks++
3669 // Stop profiler on this thread so that it is safe to lock prof.
3670 // if a profiling signal came in while we had prof locked,
3671 // it would deadlock.
3672 setThreadCPUProfiler(0)
3674 for !atomic.Cas(&prof.signalLock, 0, 1) {
3675 osyield()
3677 if prof.hz != hz {
3678 setProcessCPUProfiler(hz)
3679 prof.hz = hz
3681 atomic.Store(&prof.signalLock, 0)
3683 lock(&sched.lock)
3684 sched.profilehz = hz
3685 unlock(&sched.lock)
3687 if hz != 0 {
3688 setThreadCPUProfiler(hz)
3691 _g_.m.locks--
3694 // init initializes pp, which may be a freshly allocated p or a
3695 // previously destroyed p, and transitions it to status _Pgcstop.
3696 func (pp *p) init(id int32) {
3697 pp.id = id
3698 pp.status = _Pgcstop
3699 pp.sudogcache = pp.sudogbuf[:0]
3700 pp.deferpool = pp.deferpoolbuf[:0]
3701 pp.wbBuf.reset()
3702 if pp.mcache == nil {
3703 if id == 0 {
3704 if getg().m.mcache == nil {
3705 throw("missing mcache?")
3707 pp.mcache = getg().m.mcache // bootstrap
3708 } else {
3709 pp.mcache = allocmcache()
3712 if raceenabled && pp.raceprocctx == 0 {
3713 if id == 0 {
3714 pp.raceprocctx = raceprocctx0
3715 raceprocctx0 = 0 // bootstrap
3716 } else {
3717 pp.raceprocctx = raceproccreate()
3722 // destroy releases all of the resources associated with pp and
3723 // transitions it to status _Pdead.
3725 // sched.lock must be held and the world must be stopped.
3726 func (pp *p) destroy() {
3727 // Move all runnable goroutines to the global queue
3728 for pp.runqhead != pp.runqtail {
3729 // Pop from tail of local queue
3730 pp.runqtail--
3731 gp := pp.runq[pp.runqtail%uint32(len(pp.runq))].ptr()
3732 // Push onto head of global queue
3733 globrunqputhead(gp)
3735 if pp.runnext != 0 {
3736 globrunqputhead(pp.runnext.ptr())
3737 pp.runnext = 0
3739 // If there's a background worker, make it runnable and put
3740 // it on the global queue so it can clean itself up.
3741 if gp := pp.gcBgMarkWorker.ptr(); gp != nil {
3742 casgstatus(gp, _Gwaiting, _Grunnable)
3743 if trace.enabled {
3744 traceGoUnpark(gp, 0)
3746 globrunqput(gp)
3747 // This assignment doesn't race because the
3748 // world is stopped.
3749 pp.gcBgMarkWorker.set(nil)
3751 // Flush p's write barrier buffer.
3752 if gcphase != _GCoff {
3753 wbBufFlush1(pp)
3754 pp.gcw.dispose()
3756 for i := range pp.sudogbuf {
3757 pp.sudogbuf[i] = nil
3759 pp.sudogcache = pp.sudogbuf[:0]
3760 for i := range pp.deferpoolbuf {
3761 pp.deferpoolbuf[i] = nil
3763 pp.deferpool = pp.deferpoolbuf[:0]
3764 freemcache(pp.mcache)
3765 pp.mcache = nil
3766 gfpurge(pp)
3767 traceProcFree(pp)
3768 if raceenabled {
3769 raceprocdestroy(pp.raceprocctx)
3770 pp.raceprocctx = 0
3772 pp.gcAssistTime = 0
3773 pp.status = _Pdead
3776 // Change number of processors. The world is stopped, sched is locked.
3777 // gcworkbufs are not being modified by either the GC or
3778 // the write barrier code.
3779 // Returns list of Ps with local work, they need to be scheduled by the caller.
3780 func procresize(nprocs int32) *p {
3781 old := gomaxprocs
3782 if old < 0 || nprocs <= 0 {
3783 throw("procresize: invalid arg")
3785 if trace.enabled {
3786 traceGomaxprocs(nprocs)
3789 // update statistics
3790 now := nanotime()
3791 if sched.procresizetime != 0 {
3792 sched.totaltime += int64(old) * (now - sched.procresizetime)
3794 sched.procresizetime = now
3796 // Grow allp if necessary.
3797 if nprocs > int32(len(allp)) {
3798 // Synchronize with retake, which could be running
3799 // concurrently since it doesn't run on a P.
3800 lock(&allpLock)
3801 if nprocs <= int32(cap(allp)) {
3802 allp = allp[:nprocs]
3803 } else {
3804 nallp := make([]*p, nprocs)
3805 // Copy everything up to allp's cap so we
3806 // never lose old allocated Ps.
3807 copy(nallp, allp[:cap(allp)])
3808 allp = nallp
3810 unlock(&allpLock)
3813 // initialize new P's
3814 for i := old; i < nprocs; i++ {
3815 pp := allp[i]
3816 if pp == nil {
3817 pp = new(p)
3819 pp.init(i)
3820 atomicstorep(unsafe.Pointer(&allp[i]), unsafe.Pointer(pp))
3823 _g_ := getg()
3824 if _g_.m.p != 0 && _g_.m.p.ptr().id < nprocs {
3825 // continue to use the current P
3826 _g_.m.p.ptr().status = _Prunning
3827 _g_.m.p.ptr().mcache.prepareForSweep()
3828 } else {
3829 // release the current P and acquire allp[0].
3831 // We must do this before destroying our current P
3832 // because p.destroy itself has write barriers, so we
3833 // need to do that from a valid P.
3834 if _g_.m.p != 0 {
3835 if trace.enabled {
3836 // Pretend that we were descheduled
3837 // and then scheduled again to keep
3838 // the trace sane.
3839 traceGoSched()
3840 traceProcStop(_g_.m.p.ptr())
3842 _g_.m.p.ptr().m = 0
3844 _g_.m.p = 0
3845 _g_.m.mcache = nil
3846 p := allp[0]
3847 p.m = 0
3848 p.status = _Pidle
3849 acquirep(p)
3850 if trace.enabled {
3851 traceGoStart()
3855 // release resources from unused P's
3856 for i := nprocs; i < old; i++ {
3857 p := allp[i]
3858 p.destroy()
3859 // can't free P itself because it can be referenced by an M in syscall
3862 // Trim allp.
3863 if int32(len(allp)) != nprocs {
3864 lock(&allpLock)
3865 allp = allp[:nprocs]
3866 unlock(&allpLock)
3869 var runnablePs *p
3870 for i := nprocs - 1; i >= 0; i-- {
3871 p := allp[i]
3872 if _g_.m.p.ptr() == p {
3873 continue
3875 p.status = _Pidle
3876 if runqempty(p) {
3877 pidleput(p)
3878 } else {
3879 p.m.set(mget())
3880 p.link.set(runnablePs)
3881 runnablePs = p
3884 stealOrder.reset(uint32(nprocs))
3885 var int32p *int32 = &gomaxprocs // make compiler check that gomaxprocs is an int32
3886 atomic.Store((*uint32)(unsafe.Pointer(int32p)), uint32(nprocs))
3887 return runnablePs
3890 // Associate p and the current m.
3892 // This function is allowed to have write barriers even if the caller
3893 // isn't because it immediately acquires _p_.
3895 //go:yeswritebarrierrec
3896 func acquirep(_p_ *p) {
3897 // Do the part that isn't allowed to have write barriers.
3898 wirep(_p_)
3900 // Have p; write barriers now allowed.
3902 // Perform deferred mcache flush before this P can allocate
3903 // from a potentially stale mcache.
3904 _p_.mcache.prepareForSweep()
3906 if trace.enabled {
3907 traceProcStart()
3911 // wirep is the first step of acquirep, which actually associates the
3912 // current M to _p_. This is broken out so we can disallow write
3913 // barriers for this part, since we don't yet have a P.
3915 //go:nowritebarrierrec
3916 //go:nosplit
3917 func wirep(_p_ *p) {
3918 _g_ := getg()
3920 if _g_.m.p != 0 || _g_.m.mcache != nil {
3921 throw("wirep: already in go")
3923 if _p_.m != 0 || _p_.status != _Pidle {
3924 id := int64(0)
3925 if _p_.m != 0 {
3926 id = _p_.m.ptr().id
3928 print("wirep: p->m=", _p_.m, "(", id, ") p->status=", _p_.status, "\n")
3929 throw("wirep: invalid p state")
3931 _g_.m.mcache = _p_.mcache
3932 _g_.m.p.set(_p_)
3933 _p_.m.set(_g_.m)
3934 _p_.status = _Prunning
3937 // Disassociate p and the current m.
3938 func releasep() *p {
3939 _g_ := getg()
3941 if _g_.m.p == 0 || _g_.m.mcache == nil {
3942 throw("releasep: invalid arg")
3944 _p_ := _g_.m.p.ptr()
3945 if _p_.m.ptr() != _g_.m || _p_.mcache != _g_.m.mcache || _p_.status != _Prunning {
3946 print("releasep: m=", _g_.m, " m->p=", _g_.m.p.ptr(), " p->m=", hex(_p_.m), " m->mcache=", _g_.m.mcache, " p->mcache=", _p_.mcache, " p->status=", _p_.status, "\n")
3947 throw("releasep: invalid p state")
3949 if trace.enabled {
3950 traceProcStop(_g_.m.p.ptr())
3952 _g_.m.p = 0
3953 _g_.m.mcache = nil
3954 _p_.m = 0
3955 _p_.status = _Pidle
3956 return _p_
3959 func incidlelocked(v int32) {
3960 lock(&sched.lock)
3961 sched.nmidlelocked += v
3962 if v > 0 {
3963 checkdead()
3965 unlock(&sched.lock)
3968 // Check for deadlock situation.
3969 // The check is based on number of running M's, if 0 -> deadlock.
3970 // sched.lock must be held.
3971 func checkdead() {
3972 // For -buildmode=c-shared or -buildmode=c-archive it's OK if
3973 // there are no running goroutines. The calling program is
3974 // assumed to be running.
3975 if islibrary || isarchive {
3976 return
3979 // If we are dying because of a signal caught on an already idle thread,
3980 // freezetheworld will cause all running threads to block.
3981 // And runtime will essentially enter into deadlock state,
3982 // except that there is a thread that will call exit soon.
3983 if panicking > 0 {
3984 return
3987 // If we are not running under cgo, but we have an extra M then account
3988 // for it. (It is possible to have an extra M on Windows without cgo to
3989 // accommodate callbacks created by syscall.NewCallback. See issue #6751
3990 // for details.)
3991 var run0 int32
3992 if !iscgo && cgoHasExtraM {
3993 mp := lockextra(true)
3994 haveExtraM := extraMCount > 0
3995 unlockextra(mp)
3996 if haveExtraM {
3997 run0 = 1
4001 run := mcount() - sched.nmidle - sched.nmidlelocked - sched.nmsys
4002 if run > run0 {
4003 return
4005 if run < 0 {
4006 print("runtime: checkdead: nmidle=", sched.nmidle, " nmidlelocked=", sched.nmidlelocked, " mcount=", mcount(), " nmsys=", sched.nmsys, "\n")
4007 throw("checkdead: inconsistent counts")
4010 grunning := 0
4011 lock(&allglock)
4012 for i := 0; i < len(allgs); i++ {
4013 gp := allgs[i]
4014 if isSystemGoroutine(gp, false) {
4015 continue
4017 s := readgstatus(gp)
4018 switch s &^ _Gscan {
4019 case _Gwaiting:
4020 grunning++
4021 case _Grunnable,
4022 _Grunning,
4023 _Gsyscall:
4024 unlock(&allglock)
4025 print("runtime: checkdead: find g ", gp.goid, " in status ", s, "\n")
4026 throw("checkdead: runnable g")
4029 unlock(&allglock)
4030 if grunning == 0 { // possible if main goroutine calls runtime·Goexit()
4031 throw("no goroutines (main called runtime.Goexit) - deadlock!")
4034 // Maybe jump time forward for playground.
4035 gp := timejump()
4036 if gp != nil {
4037 casgstatus(gp, _Gwaiting, _Grunnable)
4038 globrunqput(gp)
4039 _p_ := pidleget()
4040 if _p_ == nil {
4041 throw("checkdead: no p for timer")
4043 mp := mget()
4044 if mp == nil {
4045 // There should always be a free M since
4046 // nothing is running.
4047 throw("checkdead: no m for timer")
4049 mp.nextp.set(_p_)
4050 notewakeup(&mp.park)
4051 return
4054 getg().m.throwing = -1 // do not dump full stacks
4055 throw("all goroutines are asleep - deadlock!")
4058 // forcegcperiod is the maximum time in nanoseconds between garbage
4059 // collections. If we go this long without a garbage collection, one
4060 // is forced to run.
4062 // This is a variable for testing purposes. It normally doesn't change.
4063 var forcegcperiod int64 = 2 * 60 * 1e9
4065 // Always runs without a P, so write barriers are not allowed.
4067 //go:nowritebarrierrec
4068 func sysmon() {
4069 lock(&sched.lock)
4070 sched.nmsys++
4071 checkdead()
4072 unlock(&sched.lock)
4074 lasttrace := int64(0)
4075 idle := 0 // how many cycles in succession we had not wokeup somebody
4076 delay := uint32(0)
4077 for {
4078 if idle == 0 { // start with 20us sleep...
4079 delay = 20
4080 } else if idle > 50 { // start doubling the sleep after 1ms...
4081 delay *= 2
4083 if delay > 10*1000 { // up to 10ms
4084 delay = 10 * 1000
4086 usleep(delay)
4087 if debug.schedtrace <= 0 && (sched.gcwaiting != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs)) {
4088 lock(&sched.lock)
4089 if atomic.Load(&sched.gcwaiting) != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs) {
4090 atomic.Store(&sched.sysmonwait, 1)
4091 unlock(&sched.lock)
4092 // Make wake-up period small enough
4093 // for the sampling to be correct.
4094 maxsleep := forcegcperiod / 2
4095 shouldRelax := true
4096 if osRelaxMinNS > 0 {
4097 next := timeSleepUntil()
4098 now := nanotime()
4099 if next-now < osRelaxMinNS {
4100 shouldRelax = false
4103 if shouldRelax {
4104 osRelax(true)
4106 notetsleep(&sched.sysmonnote, maxsleep)
4107 if shouldRelax {
4108 osRelax(false)
4110 lock(&sched.lock)
4111 atomic.Store(&sched.sysmonwait, 0)
4112 noteclear(&sched.sysmonnote)
4113 idle = 0
4114 delay = 20
4116 unlock(&sched.lock)
4118 // trigger libc interceptors if needed
4119 if *cgo_yield != nil {
4120 asmcgocall(*cgo_yield, nil)
4122 // poll network if not polled for more than 10ms
4123 lastpoll := int64(atomic.Load64(&sched.lastpoll))
4124 now := nanotime()
4125 if netpollinited() && lastpoll != 0 && lastpoll+10*1000*1000 < now {
4126 atomic.Cas64(&sched.lastpoll, uint64(lastpoll), uint64(now))
4127 list := netpoll(false) // non-blocking - returns list of goroutines
4128 if !list.empty() {
4129 // Need to decrement number of idle locked M's
4130 // (pretending that one more is running) before injectglist.
4131 // Otherwise it can lead to the following situation:
4132 // injectglist grabs all P's but before it starts M's to run the P's,
4133 // another M returns from syscall, finishes running its G,
4134 // observes that there is no work to do and no other running M's
4135 // and reports deadlock.
4136 incidlelocked(-1)
4137 injectglist(&list)
4138 incidlelocked(1)
4141 // retake P's blocked in syscalls
4142 // and preempt long running G's
4143 if retake(now) != 0 {
4144 idle = 0
4145 } else {
4146 idle++
4148 // check if we need to force a GC
4149 if t := (gcTrigger{kind: gcTriggerTime, now: now}); t.test() && atomic.Load(&forcegc.idle) != 0 {
4150 lock(&forcegc.lock)
4151 forcegc.idle = 0
4152 var list gList
4153 list.push(forcegc.g)
4154 injectglist(&list)
4155 unlock(&forcegc.lock)
4157 if debug.schedtrace > 0 && lasttrace+int64(debug.schedtrace)*1000000 <= now {
4158 lasttrace = now
4159 schedtrace(debug.scheddetail > 0)
4164 type sysmontick struct {
4165 schedtick uint32
4166 schedwhen int64
4167 syscalltick uint32
4168 syscallwhen int64
4171 // forcePreemptNS is the time slice given to a G before it is
4172 // preempted.
4173 const forcePreemptNS = 10 * 1000 * 1000 // 10ms
4175 func retake(now int64) uint32 {
4176 n := 0
4177 // Prevent allp slice changes. This lock will be completely
4178 // uncontended unless we're already stopping the world.
4179 lock(&allpLock)
4180 // We can't use a range loop over allp because we may
4181 // temporarily drop the allpLock. Hence, we need to re-fetch
4182 // allp each time around the loop.
4183 for i := 0; i < len(allp); i++ {
4184 _p_ := allp[i]
4185 if _p_ == nil {
4186 // This can happen if procresize has grown
4187 // allp but not yet created new Ps.
4188 continue
4190 pd := &_p_.sysmontick
4191 s := _p_.status
4192 sysretake := false
4193 if s == _Prunning || s == _Psyscall {
4194 // Preempt G if it's running for too long.
4195 t := int64(_p_.schedtick)
4196 if int64(pd.schedtick) != t {
4197 pd.schedtick = uint32(t)
4198 pd.schedwhen = now
4199 } else if pd.schedwhen+forcePreemptNS <= now {
4200 preemptone(_p_)
4201 // In case of syscall, preemptone() doesn't
4202 // work, because there is no M wired to P.
4203 sysretake = true
4206 if s == _Psyscall {
4207 // Retake P from syscall if it's there for more than 1 sysmon tick (at least 20us).
4208 t := int64(_p_.syscalltick)
4209 if !sysretake && int64(pd.syscalltick) != t {
4210 pd.syscalltick = uint32(t)
4211 pd.syscallwhen = now
4212 continue
4214 // On the one hand we don't want to retake Ps if there is no other work to do,
4215 // but on the other hand we want to retake them eventually
4216 // because they can prevent the sysmon thread from deep sleep.
4217 if runqempty(_p_) && atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) > 0 && pd.syscallwhen+10*1000*1000 > now {
4218 continue
4220 // Drop allpLock so we can take sched.lock.
4221 unlock(&allpLock)
4222 // Need to decrement number of idle locked M's
4223 // (pretending that one more is running) before the CAS.
4224 // Otherwise the M from which we retake can exit the syscall,
4225 // increment nmidle and report deadlock.
4226 incidlelocked(-1)
4227 if atomic.Cas(&_p_.status, s, _Pidle) {
4228 if trace.enabled {
4229 traceGoSysBlock(_p_)
4230 traceProcStop(_p_)
4233 _p_.syscalltick++
4234 handoffp(_p_)
4236 incidlelocked(1)
4237 lock(&allpLock)
4240 unlock(&allpLock)
4241 return uint32(n)
4244 // Tell all goroutines that they have been preempted and they should stop.
4245 // This function is purely best-effort. It can fail to inform a goroutine if a
4246 // processor just started running it.
4247 // No locks need to be held.
4248 // Returns true if preemption request was issued to at least one goroutine.
4249 func preemptall() bool {
4250 res := false
4251 for _, _p_ := range allp {
4252 if _p_.status != _Prunning {
4253 continue
4255 if preemptone(_p_) {
4256 res = true
4259 return res
4262 // Tell the goroutine running on processor P to stop.
4263 // This function is purely best-effort. It can incorrectly fail to inform the
4264 // goroutine. It can send inform the wrong goroutine. Even if it informs the
4265 // correct goroutine, that goroutine might ignore the request if it is
4266 // simultaneously executing newstack.
4267 // No lock needs to be held.
4268 // Returns true if preemption request was issued.
4269 // The actual preemption will happen at some point in the future
4270 // and will be indicated by the gp->status no longer being
4271 // Grunning
4272 func preemptone(_p_ *p) bool {
4273 mp := _p_.m.ptr()
4274 if mp == nil || mp == getg().m {
4275 return false
4277 gp := mp.curg
4278 if gp == nil || gp == mp.g0 {
4279 return false
4282 gp.preempt = true
4284 // At this point the gc implementation sets gp.stackguard0 to
4285 // a value that causes the goroutine to suspend itself.
4286 // gccgo has no support for this, and it's hard to support.
4287 // The split stack code reads a value from its TCB.
4288 // We have no way to set a value in the TCB of a different thread.
4289 // And, of course, not all systems support split stack anyhow.
4290 // Checking the field in the g is expensive, since it requires
4291 // loading the g from TLS. The best mechanism is likely to be
4292 // setting a global variable and figuring out a way to efficiently
4293 // check that global variable.
4295 // For now we check gp.preempt in schedule, mallocgc, selectgo,
4296 // and a few other places, which is at least better than doing
4297 // nothing at all.
4299 return true
4302 var starttime int64
4304 func schedtrace(detailed bool) {
4305 now := nanotime()
4306 if starttime == 0 {
4307 starttime = now
4310 lock(&sched.lock)
4311 print("SCHED ", (now-starttime)/1e6, "ms: gomaxprocs=", gomaxprocs, " idleprocs=", sched.npidle, " threads=", mcount(), " spinningthreads=", sched.nmspinning, " idlethreads=", sched.nmidle, " runqueue=", sched.runqsize)
4312 if detailed {
4313 print(" gcwaiting=", sched.gcwaiting, " nmidlelocked=", sched.nmidlelocked, " stopwait=", sched.stopwait, " sysmonwait=", sched.sysmonwait, "\n")
4315 // We must be careful while reading data from P's, M's and G's.
4316 // Even if we hold schedlock, most data can be changed concurrently.
4317 // E.g. (p->m ? p->m->id : -1) can crash if p->m changes from non-nil to nil.
4318 for i, _p_ := range allp {
4319 mp := _p_.m.ptr()
4320 h := atomic.Load(&_p_.runqhead)
4321 t := atomic.Load(&_p_.runqtail)
4322 if detailed {
4323 id := int64(-1)
4324 if mp != nil {
4325 id = mp.id
4327 print(" P", i, ": status=", _p_.status, " schedtick=", _p_.schedtick, " syscalltick=", _p_.syscalltick, " m=", id, " runqsize=", t-h, " gfreecnt=", _p_.gFree.n, "\n")
4328 } else {
4329 // In non-detailed mode format lengths of per-P run queues as:
4330 // [len1 len2 len3 len4]
4331 print(" ")
4332 if i == 0 {
4333 print("[")
4335 print(t - h)
4336 if i == len(allp)-1 {
4337 print("]\n")
4342 if !detailed {
4343 unlock(&sched.lock)
4344 return
4347 for mp := allm; mp != nil; mp = mp.alllink {
4348 _p_ := mp.p.ptr()
4349 gp := mp.curg
4350 lockedg := mp.lockedg.ptr()
4351 id1 := int32(-1)
4352 if _p_ != nil {
4353 id1 = _p_.id
4355 id2 := int64(-1)
4356 if gp != nil {
4357 id2 = gp.goid
4359 id3 := int64(-1)
4360 if lockedg != nil {
4361 id3 = lockedg.goid
4363 print(" M", mp.id, ": p=", id1, " curg=", id2, " mallocing=", mp.mallocing, " throwing=", mp.throwing, " preemptoff=", mp.preemptoff, ""+" locks=", mp.locks, " dying=", mp.dying, " spinning=", mp.spinning, " blocked=", mp.blocked, " lockedg=", id3, "\n")
4366 lock(&allglock)
4367 for gi := 0; gi < len(allgs); gi++ {
4368 gp := allgs[gi]
4369 mp := gp.m
4370 lockedm := gp.lockedm.ptr()
4371 id1 := int64(-1)
4372 if mp != nil {
4373 id1 = mp.id
4375 id2 := int64(-1)
4376 if lockedm != nil {
4377 id2 = lockedm.id
4379 print(" G", gp.goid, ": status=", readgstatus(gp), "(", gp.waitreason.String(), ") m=", id1, " lockedm=", id2, "\n")
4381 unlock(&allglock)
4382 unlock(&sched.lock)
4385 // schedEnableUser enables or disables the scheduling of user
4386 // goroutines.
4388 // This does not stop already running user goroutines, so the caller
4389 // should first stop the world when disabling user goroutines.
4390 func schedEnableUser(enable bool) {
4391 lock(&sched.lock)
4392 if sched.disable.user == !enable {
4393 unlock(&sched.lock)
4394 return
4396 sched.disable.user = !enable
4397 if enable {
4398 n := sched.disable.n
4399 sched.disable.n = 0
4400 globrunqputbatch(&sched.disable.runnable, n)
4401 unlock(&sched.lock)
4402 for ; n != 0 && sched.npidle != 0; n-- {
4403 startm(nil, false)
4405 } else {
4406 unlock(&sched.lock)
4410 // schedEnabled reports whether gp should be scheduled. It returns
4411 // false is scheduling of gp is disabled.
4412 func schedEnabled(gp *g) bool {
4413 if sched.disable.user {
4414 return isSystemGoroutine(gp, true)
4416 return true
4419 // Put mp on midle list.
4420 // Sched must be locked.
4421 // May run during STW, so write barriers are not allowed.
4422 //go:nowritebarrierrec
4423 func mput(mp *m) {
4424 mp.schedlink = sched.midle
4425 sched.midle.set(mp)
4426 sched.nmidle++
4427 checkdead()
4430 // Try to get an m from midle list.
4431 // Sched must be locked.
4432 // May run during STW, so write barriers are not allowed.
4433 //go:nowritebarrierrec
4434 func mget() *m {
4435 mp := sched.midle.ptr()
4436 if mp != nil {
4437 sched.midle = mp.schedlink
4438 sched.nmidle--
4440 return mp
4443 // Put gp on the global runnable queue.
4444 // Sched must be locked.
4445 // May run during STW, so write barriers are not allowed.
4446 //go:nowritebarrierrec
4447 func globrunqput(gp *g) {
4448 sched.runq.pushBack(gp)
4449 sched.runqsize++
4452 // Put gp at the head of the global runnable queue.
4453 // Sched must be locked.
4454 // May run during STW, so write barriers are not allowed.
4455 //go:nowritebarrierrec
4456 func globrunqputhead(gp *g) {
4457 sched.runq.push(gp)
4458 sched.runqsize++
4461 // Put a batch of runnable goroutines on the global runnable queue.
4462 // This clears *batch.
4463 // Sched must be locked.
4464 func globrunqputbatch(batch *gQueue, n int32) {
4465 sched.runq.pushBackAll(*batch)
4466 sched.runqsize += n
4467 *batch = gQueue{}
4470 // Try get a batch of G's from the global runnable queue.
4471 // Sched must be locked.
4472 func globrunqget(_p_ *p, max int32) *g {
4473 if sched.runqsize == 0 {
4474 return nil
4477 n := sched.runqsize/gomaxprocs + 1
4478 if n > sched.runqsize {
4479 n = sched.runqsize
4481 if max > 0 && n > max {
4482 n = max
4484 if n > int32(len(_p_.runq))/2 {
4485 n = int32(len(_p_.runq)) / 2
4488 sched.runqsize -= n
4490 gp := sched.runq.pop()
4492 for ; n > 0; n-- {
4493 gp1 := sched.runq.pop()
4494 runqput(_p_, gp1, false)
4496 return gp
4499 // Put p to on _Pidle list.
4500 // Sched must be locked.
4501 // May run during STW, so write barriers are not allowed.
4502 //go:nowritebarrierrec
4503 func pidleput(_p_ *p) {
4504 if !runqempty(_p_) {
4505 throw("pidleput: P has non-empty run queue")
4507 _p_.link = sched.pidle
4508 sched.pidle.set(_p_)
4509 atomic.Xadd(&sched.npidle, 1) // TODO: fast atomic
4512 // Try get a p from _Pidle list.
4513 // Sched must be locked.
4514 // May run during STW, so write barriers are not allowed.
4515 //go:nowritebarrierrec
4516 func pidleget() *p {
4517 _p_ := sched.pidle.ptr()
4518 if _p_ != nil {
4519 sched.pidle = _p_.link
4520 atomic.Xadd(&sched.npidle, -1) // TODO: fast atomic
4522 return _p_
4525 // runqempty reports whether _p_ has no Gs on its local run queue.
4526 // It never returns true spuriously.
4527 func runqempty(_p_ *p) bool {
4528 // Defend against a race where 1) _p_ has G1 in runqnext but runqhead == runqtail,
4529 // 2) runqput on _p_ kicks G1 to the runq, 3) runqget on _p_ empties runqnext.
4530 // Simply observing that runqhead == runqtail and then observing that runqnext == nil
4531 // does not mean the queue is empty.
4532 for {
4533 head := atomic.Load(&_p_.runqhead)
4534 tail := atomic.Load(&_p_.runqtail)
4535 runnext := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&_p_.runnext)))
4536 if tail == atomic.Load(&_p_.runqtail) {
4537 return head == tail && runnext == 0
4542 // To shake out latent assumptions about scheduling order,
4543 // we introduce some randomness into scheduling decisions
4544 // when running with the race detector.
4545 // The need for this was made obvious by changing the
4546 // (deterministic) scheduling order in Go 1.5 and breaking
4547 // many poorly-written tests.
4548 // With the randomness here, as long as the tests pass
4549 // consistently with -race, they shouldn't have latent scheduling
4550 // assumptions.
4551 const randomizeScheduler = raceenabled
4553 // runqput tries to put g on the local runnable queue.
4554 // If next is false, runqput adds g to the tail of the runnable queue.
4555 // If next is true, runqput puts g in the _p_.runnext slot.
4556 // If the run queue is full, runnext puts g on the global queue.
4557 // Executed only by the owner P.
4558 func runqput(_p_ *p, gp *g, next bool) {
4559 if randomizeScheduler && next && fastrand()%2 == 0 {
4560 next = false
4563 if next {
4564 retryNext:
4565 oldnext := _p_.runnext
4566 if !_p_.runnext.cas(oldnext, guintptr(unsafe.Pointer(gp))) {
4567 goto retryNext
4569 if oldnext == 0 {
4570 return
4572 // Kick the old runnext out to the regular run queue.
4573 gp = oldnext.ptr()
4576 retry:
4577 h := atomic.LoadAcq(&_p_.runqhead) // load-acquire, synchronize with consumers
4578 t := _p_.runqtail
4579 if t-h < uint32(len(_p_.runq)) {
4580 _p_.runq[t%uint32(len(_p_.runq))].set(gp)
4581 atomic.StoreRel(&_p_.runqtail, t+1) // store-release, makes the item available for consumption
4582 return
4584 if runqputslow(_p_, gp, h, t) {
4585 return
4587 // the queue is not full, now the put above must succeed
4588 goto retry
4591 // Put g and a batch of work from local runnable queue on global queue.
4592 // Executed only by the owner P.
4593 func runqputslow(_p_ *p, gp *g, h, t uint32) bool {
4594 var batch [len(_p_.runq)/2 + 1]*g
4596 // First, grab a batch from local queue.
4597 n := t - h
4598 n = n / 2
4599 if n != uint32(len(_p_.runq)/2) {
4600 throw("runqputslow: queue is not full")
4602 for i := uint32(0); i < n; i++ {
4603 batch[i] = _p_.runq[(h+i)%uint32(len(_p_.runq))].ptr()
4605 if !atomic.CasRel(&_p_.runqhead, h, h+n) { // cas-release, commits consume
4606 return false
4608 batch[n] = gp
4610 if randomizeScheduler {
4611 for i := uint32(1); i <= n; i++ {
4612 j := fastrandn(i + 1)
4613 batch[i], batch[j] = batch[j], batch[i]
4617 // Link the goroutines.
4618 for i := uint32(0); i < n; i++ {
4619 batch[i].schedlink.set(batch[i+1])
4621 var q gQueue
4622 q.head.set(batch[0])
4623 q.tail.set(batch[n])
4625 // Now put the batch on global queue.
4626 lock(&sched.lock)
4627 globrunqputbatch(&q, int32(n+1))
4628 unlock(&sched.lock)
4629 return true
4632 // Get g from local runnable queue.
4633 // If inheritTime is true, gp should inherit the remaining time in the
4634 // current time slice. Otherwise, it should start a new time slice.
4635 // Executed only by the owner P.
4636 func runqget(_p_ *p) (gp *g, inheritTime bool) {
4637 // If there's a runnext, it's the next G to run.
4638 for {
4639 next := _p_.runnext
4640 if next == 0 {
4641 break
4643 if _p_.runnext.cas(next, 0) {
4644 return next.ptr(), true
4648 for {
4649 h := atomic.LoadAcq(&_p_.runqhead) // load-acquire, synchronize with other consumers
4650 t := _p_.runqtail
4651 if t == h {
4652 return nil, false
4654 gp := _p_.runq[h%uint32(len(_p_.runq))].ptr()
4655 if atomic.CasRel(&_p_.runqhead, h, h+1) { // cas-release, commits consume
4656 return gp, false
4661 // Grabs a batch of goroutines from _p_'s runnable queue into batch.
4662 // Batch is a ring buffer starting at batchHead.
4663 // Returns number of grabbed goroutines.
4664 // Can be executed by any P.
4665 func runqgrab(_p_ *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool) uint32 {
4666 for {
4667 h := atomic.LoadAcq(&_p_.runqhead) // load-acquire, synchronize with other consumers
4668 t := atomic.LoadAcq(&_p_.runqtail) // load-acquire, synchronize with the producer
4669 n := t - h
4670 n = n - n/2
4671 if n == 0 {
4672 if stealRunNextG {
4673 // Try to steal from _p_.runnext.
4674 if next := _p_.runnext; next != 0 {
4675 if _p_.status == _Prunning {
4676 // Sleep to ensure that _p_ isn't about to run the g
4677 // we are about to steal.
4678 // The important use case here is when the g running
4679 // on _p_ ready()s another g and then almost
4680 // immediately blocks. Instead of stealing runnext
4681 // in this window, back off to give _p_ a chance to
4682 // schedule runnext. This will avoid thrashing gs
4683 // between different Ps.
4684 // A sync chan send/recv takes ~50ns as of time of
4685 // writing, so 3us gives ~50x overshoot.
4686 if GOOS != "windows" {
4687 usleep(3)
4688 } else {
4689 // On windows system timer granularity is
4690 // 1-15ms, which is way too much for this
4691 // optimization. So just yield.
4692 osyield()
4695 if !_p_.runnext.cas(next, 0) {
4696 continue
4698 batch[batchHead%uint32(len(batch))] = next
4699 return 1
4702 return 0
4704 if n > uint32(len(_p_.runq)/2) { // read inconsistent h and t
4705 continue
4707 for i := uint32(0); i < n; i++ {
4708 g := _p_.runq[(h+i)%uint32(len(_p_.runq))]
4709 batch[(batchHead+i)%uint32(len(batch))] = g
4711 if atomic.CasRel(&_p_.runqhead, h, h+n) { // cas-release, commits consume
4712 return n
4717 // Steal half of elements from local runnable queue of p2
4718 // and put onto local runnable queue of p.
4719 // Returns one of the stolen elements (or nil if failed).
4720 func runqsteal(_p_, p2 *p, stealRunNextG bool) *g {
4721 t := _p_.runqtail
4722 n := runqgrab(p2, &_p_.runq, t, stealRunNextG)
4723 if n == 0 {
4724 return nil
4727 gp := _p_.runq[(t+n)%uint32(len(_p_.runq))].ptr()
4728 if n == 0 {
4729 return gp
4731 h := atomic.LoadAcq(&_p_.runqhead) // load-acquire, synchronize with consumers
4732 if t-h+n >= uint32(len(_p_.runq)) {
4733 throw("runqsteal: runq overflow")
4735 atomic.StoreRel(&_p_.runqtail, t+n) // store-release, makes the item available for consumption
4736 return gp
4739 // A gQueue is a dequeue of Gs linked through g.schedlink. A G can only
4740 // be on one gQueue or gList at a time.
4741 type gQueue struct {
4742 head guintptr
4743 tail guintptr
4746 // empty reports whether q is empty.
4747 func (q *gQueue) empty() bool {
4748 return q.head == 0
4751 // push adds gp to the head of q.
4752 func (q *gQueue) push(gp *g) {
4753 gp.schedlink = q.head
4754 q.head.set(gp)
4755 if q.tail == 0 {
4756 q.tail.set(gp)
4760 // pushBack adds gp to the tail of q.
4761 func (q *gQueue) pushBack(gp *g) {
4762 gp.schedlink = 0
4763 if q.tail != 0 {
4764 q.tail.ptr().schedlink.set(gp)
4765 } else {
4766 q.head.set(gp)
4768 q.tail.set(gp)
4771 // pushBackAll adds all Gs in l2 to the tail of q. After this q2 must
4772 // not be used.
4773 func (q *gQueue) pushBackAll(q2 gQueue) {
4774 if q2.tail == 0 {
4775 return
4777 q2.tail.ptr().schedlink = 0
4778 if q.tail != 0 {
4779 q.tail.ptr().schedlink = q2.head
4780 } else {
4781 q.head = q2.head
4783 q.tail = q2.tail
4786 // pop removes and returns the head of queue q. It returns nil if
4787 // q is empty.
4788 func (q *gQueue) pop() *g {
4789 gp := q.head.ptr()
4790 if gp != nil {
4791 q.head = gp.schedlink
4792 if q.head == 0 {
4793 q.tail = 0
4796 return gp
4799 // popList takes all Gs in q and returns them as a gList.
4800 func (q *gQueue) popList() gList {
4801 stack := gList{q.head}
4802 *q = gQueue{}
4803 return stack
4806 // A gList is a list of Gs linked through g.schedlink. A G can only be
4807 // on one gQueue or gList at a time.
4808 type gList struct {
4809 head guintptr
4812 // empty reports whether l is empty.
4813 func (l *gList) empty() bool {
4814 return l.head == 0
4817 // push adds gp to the head of l.
4818 func (l *gList) push(gp *g) {
4819 gp.schedlink = l.head
4820 l.head.set(gp)
4823 // pushAll prepends all Gs in q to l.
4824 func (l *gList) pushAll(q gQueue) {
4825 if !q.empty() {
4826 q.tail.ptr().schedlink = l.head
4827 l.head = q.head
4831 // pop removes and returns the head of l. If l is empty, it returns nil.
4832 func (l *gList) pop() *g {
4833 gp := l.head.ptr()
4834 if gp != nil {
4835 l.head = gp.schedlink
4837 return gp
4840 //go:linkname setMaxThreads runtime..z2fdebug.setMaxThreads
4841 func setMaxThreads(in int) (out int) {
4842 lock(&sched.lock)
4843 out = int(sched.maxmcount)
4844 if in > 0x7fffffff { // MaxInt32
4845 sched.maxmcount = 0x7fffffff
4846 } else {
4847 sched.maxmcount = int32(in)
4849 checkmcount()
4850 unlock(&sched.lock)
4851 return
4854 func haveexperiment(name string) bool {
4855 // The gofrontend does not support experiments.
4856 return false
4859 //go:nosplit
4860 func procPin() int {
4861 _g_ := getg()
4862 mp := _g_.m
4864 mp.locks++
4865 return int(mp.p.ptr().id)
4868 //go:nosplit
4869 func procUnpin() {
4870 _g_ := getg()
4871 _g_.m.locks--
4874 //go:linkname sync_runtime_procPin sync.runtime_procPin
4875 //go:nosplit
4876 func sync_runtime_procPin() int {
4877 return procPin()
4880 //go:linkname sync_runtime_procUnpin sync.runtime_procUnpin
4881 //go:nosplit
4882 func sync_runtime_procUnpin() {
4883 procUnpin()
4886 //go:linkname sync_atomic_runtime_procPin sync..z2fatomic.runtime_procPin
4887 //go:nosplit
4888 func sync_atomic_runtime_procPin() int {
4889 return procPin()
4892 //go:linkname sync_atomic_runtime_procUnpin sync..z2fatomic.runtime_procUnpin
4893 //go:nosplit
4894 func sync_atomic_runtime_procUnpin() {
4895 procUnpin()
4898 // Active spinning for sync.Mutex.
4899 //go:linkname sync_runtime_canSpin sync.runtime_canSpin
4900 //go:nosplit
4901 func sync_runtime_canSpin(i int) bool {
4902 // sync.Mutex is cooperative, so we are conservative with spinning.
4903 // Spin only few times and only if running on a multicore machine and
4904 // GOMAXPROCS>1 and there is at least one other running P and local runq is empty.
4905 // As opposed to runtime mutex we don't do passive spinning here,
4906 // because there can be work on global runq or on other Ps.
4907 if i >= active_spin || ncpu <= 1 || gomaxprocs <= int32(sched.npidle+sched.nmspinning)+1 {
4908 return false
4910 if p := getg().m.p.ptr(); !runqempty(p) {
4911 return false
4913 return true
4916 //go:linkname sync_runtime_doSpin sync.runtime_doSpin
4917 //go:nosplit
4918 func sync_runtime_doSpin() {
4919 procyield(active_spin_cnt)
4922 var stealOrder randomOrder
4924 // randomOrder/randomEnum are helper types for randomized work stealing.
4925 // They allow to enumerate all Ps in different pseudo-random orders without repetitions.
4926 // The algorithm is based on the fact that if we have X such that X and GOMAXPROCS
4927 // are coprime, then a sequences of (i + X) % GOMAXPROCS gives the required enumeration.
4928 type randomOrder struct {
4929 count uint32
4930 coprimes []uint32
4933 type randomEnum struct {
4934 i uint32
4935 count uint32
4936 pos uint32
4937 inc uint32
4940 func (ord *randomOrder) reset(count uint32) {
4941 ord.count = count
4942 ord.coprimes = ord.coprimes[:0]
4943 for i := uint32(1); i <= count; i++ {
4944 if gcd(i, count) == 1 {
4945 ord.coprimes = append(ord.coprimes, i)
4950 func (ord *randomOrder) start(i uint32) randomEnum {
4951 return randomEnum{
4952 count: ord.count,
4953 pos: i % ord.count,
4954 inc: ord.coprimes[i%uint32(len(ord.coprimes))],
4958 func (enum *randomEnum) done() bool {
4959 return enum.i == enum.count
4962 func (enum *randomEnum) next() {
4963 enum.i++
4964 enum.pos = (enum.pos + enum.inc) % enum.count
4967 func (enum *randomEnum) position() uint32 {
4968 return enum.pos
4971 func gcd(a, b uint32) uint32 {
4972 for b != 0 {
4973 a, b = b, a%b
4975 return a