runtime: scan register backing store on ia64
[official-gcc.git] / libgo / go / runtime / proc.go
blob2972daa059febc1ff43a065d859fc01f274aafd8
1 // Copyright 2014 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 package runtime
7 import (
8 "runtime/internal/atomic"
9 "runtime/internal/sys"
10 "unsafe"
13 // Functions called by C code.
14 //go:linkname main runtime.main
15 //go:linkname goparkunlock runtime.goparkunlock
16 //go:linkname newextram runtime.newextram
17 //go:linkname acquirep runtime.acquirep
18 //go:linkname releasep runtime.releasep
19 //go:linkname incidlelocked runtime.incidlelocked
20 //go:linkname schedinit runtime.schedinit
21 //go:linkname ready runtime.ready
22 //go:linkname gcprocs runtime.gcprocs
23 //go:linkname stopm runtime.stopm
24 //go:linkname handoffp runtime.handoffp
25 //go:linkname wakep runtime.wakep
26 //go:linkname stoplockedm runtime.stoplockedm
27 //go:linkname schedule runtime.schedule
28 //go:linkname execute runtime.execute
29 //go:linkname goexit1 runtime.goexit1
30 //go:linkname reentersyscall runtime.reentersyscall
31 //go:linkname reentersyscallblock runtime.reentersyscallblock
32 //go:linkname exitsyscall runtime.exitsyscall
33 //go:linkname gfget runtime.gfget
34 //go:linkname helpgc runtime.helpgc
35 //go:linkname kickoff runtime.kickoff
36 //go:linkname mstart1 runtime.mstart1
37 //go:linkname mexit runtime.mexit
38 //go:linkname globrunqput runtime.globrunqput
39 //go:linkname pidleget runtime.pidleget
41 // Exported for test (see runtime/testdata/testprogcgo/dropm_stub.go).
42 //go:linkname getm runtime.getm
44 // Function called by misc/cgo/test.
45 //go:linkname lockedOSThread runtime.lockedOSThread
47 // C functions for thread and context management.
48 func newosproc(*m)
50 //go:noescape
51 func malg(bool, bool, *unsafe.Pointer, *uintptr) *g
53 //go:noescape
54 func resetNewG(*g, *unsafe.Pointer, *uintptr)
55 func gogo(*g)
56 func setGContext()
57 func makeGContext(*g, unsafe.Pointer, uintptr)
58 func getTraceback(me, gp *g)
59 func gtraceback(*g)
60 func _cgo_notify_runtime_init_done()
61 func alreadyInCallers() bool
62 func stackfree(*g)
64 // Functions created by the compiler.
65 //extern __go_init_main
66 func main_init()
68 //extern main.main
69 func main_main()
71 var buildVersion = sys.TheVersion
73 // Goroutine scheduler
74 // The scheduler's job is to distribute ready-to-run goroutines over worker threads.
76 // The main concepts are:
77 // G - goroutine.
78 // M - worker thread, or machine.
79 // P - processor, a resource that is required to execute Go code.
80 // M must have an associated P to execute Go code, however it can be
81 // blocked or in a syscall w/o an associated P.
83 // Design doc at https://golang.org/s/go11sched.
85 // Worker thread parking/unparking.
86 // We need to balance between keeping enough running worker threads to utilize
87 // available hardware parallelism and parking excessive running worker threads
88 // to conserve CPU resources and power. This is not simple for two reasons:
89 // (1) scheduler state is intentionally distributed (in particular, per-P work
90 // queues), so it is not possible to compute global predicates on fast paths;
91 // (2) for optimal thread management we would need to know the future (don't park
92 // a worker thread when a new goroutine will be readied in near future).
94 // Three rejected approaches that would work badly:
95 // 1. Centralize all scheduler state (would inhibit scalability).
96 // 2. Direct goroutine handoff. That is, when we ready a new goroutine and there
97 // is a spare P, unpark a thread and handoff it the thread and the goroutine.
98 // This would lead to thread state thrashing, as the thread that readied the
99 // goroutine can be out of work the very next moment, we will need to park it.
100 // Also, it would destroy locality of computation as we want to preserve
101 // dependent goroutines on the same thread; and introduce additional latency.
102 // 3. Unpark an additional thread whenever we ready a goroutine and there is an
103 // idle P, but don't do handoff. This would lead to excessive thread parking/
104 // unparking as the additional threads will instantly park without discovering
105 // any work to do.
107 // The current approach:
108 // We unpark an additional thread when we ready a goroutine if (1) there is an
109 // idle P and there are no "spinning" worker threads. A worker thread is considered
110 // spinning if it is out of local work and did not find work in global run queue/
111 // netpoller; the spinning state is denoted in m.spinning and in sched.nmspinning.
112 // Threads unparked this way are also considered spinning; we don't do goroutine
113 // handoff so such threads are out of work initially. Spinning threads do some
114 // spinning looking for work in per-P run queues before parking. If a spinning
115 // thread finds work it takes itself out of the spinning state and proceeds to
116 // execution. If it does not find work it takes itself out of the spinning state
117 // and then parks.
118 // If there is at least one spinning thread (sched.nmspinning>1), we don't unpark
119 // new threads when readying goroutines. To compensate for that, if the last spinning
120 // thread finds work and stops spinning, it must unpark a new spinning thread.
121 // This approach smooths out unjustified spikes of thread unparking,
122 // but at the same time guarantees eventual maximal CPU parallelism utilization.
124 // The main implementation complication is that we need to be very careful during
125 // spinning->non-spinning thread transition. This transition can race with submission
126 // of a new goroutine, and either one part or another needs to unpark another worker
127 // thread. If they both fail to do that, we can end up with semi-persistent CPU
128 // underutilization. The general pattern for goroutine readying is: submit a goroutine
129 // to local work queue, #StoreLoad-style memory barrier, check sched.nmspinning.
130 // The general pattern for spinning->non-spinning transition is: decrement nmspinning,
131 // #StoreLoad-style memory barrier, check all per-P work queues for new work.
132 // Note that all this complexity does not apply to global run queue as we are not
133 // sloppy about thread unparking when submitting to global queue. Also see comments
134 // for nmspinning manipulation.
136 var (
137 m0 m
138 g0 g
141 // main_init_done is a signal used by cgocallbackg that initialization
142 // has been completed. It is made before _cgo_notify_runtime_init_done,
143 // so all cgo calls can rely on it existing. When main_init is complete,
144 // it is closed, meaning cgocallbackg can reliably receive from it.
145 var main_init_done chan bool
147 // mainStarted indicates that the main M has started.
148 var mainStarted bool
150 // runtimeInitTime is the nanotime() at which the runtime started.
151 var runtimeInitTime int64
153 // Value to use for signal mask for newly created M's.
154 var initSigmask sigset
156 // The main goroutine.
157 func main() {
158 g := getg()
160 // Max stack size is 1 GB on 64-bit, 250 MB on 32-bit.
161 // Using decimal instead of binary GB and MB because
162 // they look nicer in the stack overflow failure message.
163 if sys.PtrSize == 8 {
164 maxstacksize = 1000000000
165 } else {
166 maxstacksize = 250000000
169 // Allow newproc to start new Ms.
170 mainStarted = true
172 systemstack(func() {
173 newm(sysmon, nil)
176 // Lock the main goroutine onto this, the main OS thread,
177 // during initialization. Most programs won't care, but a few
178 // do require certain calls to be made by the main thread.
179 // Those can arrange for main.main to run in the main thread
180 // by calling runtime.LockOSThread during initialization
181 // to preserve the lock.
182 lockOSThread()
184 if g.m != &m0 {
185 throw("runtime.main not on m0")
188 // Defer unlock so that runtime.Goexit during init does the unlock too.
189 needUnlock := true
190 defer func() {
191 if needUnlock {
192 unlockOSThread()
196 // Record when the world started. Must be after runtime_init
197 // because nanotime on some platforms depends on startNano.
198 runtimeInitTime = nanotime()
200 main_init_done = make(chan bool)
201 if iscgo {
202 // Start the template thread in case we enter Go from
203 // a C-created thread and need to create a new thread.
204 startTemplateThread()
205 _cgo_notify_runtime_init_done()
208 fn := main_init // make an indirect call, as the linker doesn't know the address of the main package when laying down the runtime
209 fn()
210 close(main_init_done)
212 needUnlock = false
213 unlockOSThread()
215 // For gccgo we have to wait until after main is initialized
216 // to enable GC, because initializing main registers the GC roots.
217 gcenable()
219 if isarchive || islibrary {
220 // A program compiled with -buildmode=c-archive or c-shared
221 // has a main, but it is not executed.
222 return
224 fn = main_main // make an indirect call, as the linker doesn't know the address of the main package when laying down the runtime
225 fn()
226 if raceenabled {
227 racefini()
230 // Make racy client program work: if panicking on
231 // another goroutine at the same time as main returns,
232 // let the other goroutine finish printing the panic trace.
233 // Once it does, it will exit. See issues 3934 and 20018.
234 if atomic.Load(&runningPanicDefers) != 0 {
235 // Running deferred functions should not take long.
236 for c := 0; c < 1000; c++ {
237 if atomic.Load(&runningPanicDefers) == 0 {
238 break
240 Gosched()
243 if atomic.Load(&panicking) != 0 {
244 gopark(nil, nil, "panicwait", traceEvGoStop, 1)
247 exit(0)
248 for {
249 var x *int32
250 *x = 0
254 // os_beforeExit is called from os.Exit(0).
255 //go:linkname os_beforeExit os.runtime_beforeExit
256 func os_beforeExit() {
257 if raceenabled {
258 racefini()
262 // start forcegc helper goroutine
263 func init() {
264 expectSystemGoroutine()
265 go forcegchelper()
268 func forcegchelper() {
269 setSystemGoroutine()
271 forcegc.g = getg()
272 for {
273 lock(&forcegc.lock)
274 if forcegc.idle != 0 {
275 throw("forcegc: phase error")
277 atomic.Store(&forcegc.idle, 1)
278 goparkunlock(&forcegc.lock, "force gc (idle)", traceEvGoBlock, 1)
279 // this goroutine is explicitly resumed by sysmon
280 if debug.gctrace > 0 {
281 println("GC forced")
283 // Time-triggered, fully concurrent.
284 gcStart(gcBackgroundMode, gcTrigger{kind: gcTriggerTime, now: nanotime()})
288 //go:nosplit
290 // Gosched yields the processor, allowing other goroutines to run. It does not
291 // suspend the current goroutine, so execution resumes automatically.
292 func Gosched() {
293 mcall(gosched_m)
296 // goschedguarded yields the processor like gosched, but also checks
297 // for forbidden states and opts out of the yield in those cases.
298 //go:nosplit
299 func goschedguarded() {
300 mcall(goschedguarded_m)
303 // Puts the current goroutine into a waiting state and calls unlockf.
304 // If unlockf returns false, the goroutine is resumed.
305 // unlockf must not access this G's stack, as it may be moved between
306 // the call to gopark and the call to unlockf.
307 func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason string, traceEv byte, traceskip int) {
308 mp := acquirem()
309 gp := mp.curg
310 status := readgstatus(gp)
311 if status != _Grunning && status != _Gscanrunning {
312 throw("gopark: bad g status")
314 mp.waitlock = lock
315 mp.waitunlockf = *(*unsafe.Pointer)(unsafe.Pointer(&unlockf))
316 gp.waitreason = reason
317 mp.waittraceev = traceEv
318 mp.waittraceskip = traceskip
319 releasem(mp)
320 // can't do anything that might move the G between Ms here.
321 mcall(park_m)
324 // Puts the current goroutine into a waiting state and unlocks the lock.
325 // The goroutine can be made runnable again by calling goready(gp).
326 func goparkunlock(lock *mutex, reason string, traceEv byte, traceskip int) {
327 gopark(parkunlock_c, unsafe.Pointer(lock), reason, traceEv, traceskip)
330 func goready(gp *g, traceskip int) {
331 systemstack(func() {
332 ready(gp, traceskip, true)
336 //go:nosplit
337 func acquireSudog() *sudog {
338 // Delicate dance: the semaphore implementation calls
339 // acquireSudog, acquireSudog calls new(sudog),
340 // new calls malloc, malloc can call the garbage collector,
341 // and the garbage collector calls the semaphore implementation
342 // in stopTheWorld.
343 // Break the cycle by doing acquirem/releasem around new(sudog).
344 // The acquirem/releasem increments m.locks during new(sudog),
345 // which keeps the garbage collector from being invoked.
346 mp := acquirem()
347 pp := mp.p.ptr()
348 if len(pp.sudogcache) == 0 {
349 lock(&sched.sudoglock)
350 // First, try to grab a batch from central cache.
351 for len(pp.sudogcache) < cap(pp.sudogcache)/2 && sched.sudogcache != nil {
352 s := sched.sudogcache
353 sched.sudogcache = s.next
354 s.next = nil
355 pp.sudogcache = append(pp.sudogcache, s)
357 unlock(&sched.sudoglock)
358 // If the central cache is empty, allocate a new one.
359 if len(pp.sudogcache) == 0 {
360 pp.sudogcache = append(pp.sudogcache, new(sudog))
363 n := len(pp.sudogcache)
364 s := pp.sudogcache[n-1]
365 pp.sudogcache[n-1] = nil
366 pp.sudogcache = pp.sudogcache[:n-1]
367 if s.elem != nil {
368 throw("acquireSudog: found s.elem != nil in cache")
370 releasem(mp)
371 return s
374 //go:nosplit
375 func releaseSudog(s *sudog) {
376 if s.elem != nil {
377 throw("runtime: sudog with non-nil elem")
379 if s.isSelect {
380 throw("runtime: sudog with non-false isSelect")
382 if s.next != nil {
383 throw("runtime: sudog with non-nil next")
385 if s.prev != nil {
386 throw("runtime: sudog with non-nil prev")
388 if s.waitlink != nil {
389 throw("runtime: sudog with non-nil waitlink")
391 if s.c != nil {
392 throw("runtime: sudog with non-nil c")
394 gp := getg()
395 if gp.param != nil {
396 throw("runtime: releaseSudog with non-nil gp.param")
398 mp := acquirem() // avoid rescheduling to another P
399 pp := mp.p.ptr()
400 if len(pp.sudogcache) == cap(pp.sudogcache) {
401 // Transfer half of local cache to the central cache.
402 var first, last *sudog
403 for len(pp.sudogcache) > cap(pp.sudogcache)/2 {
404 n := len(pp.sudogcache)
405 p := pp.sudogcache[n-1]
406 pp.sudogcache[n-1] = nil
407 pp.sudogcache = pp.sudogcache[:n-1]
408 if first == nil {
409 first = p
410 } else {
411 last.next = p
413 last = p
415 lock(&sched.sudoglock)
416 last.next = sched.sudogcache
417 sched.sudogcache = first
418 unlock(&sched.sudoglock)
420 pp.sudogcache = append(pp.sudogcache, s)
421 releasem(mp)
424 // funcPC returns the entry PC of the function f.
425 // It assumes that f is a func value. Otherwise the behavior is undefined.
426 // For gccgo note that this differs from the gc implementation; the gc
427 // implementation adds sys.PtrSize to the address of the interface
428 // value, but GCC's alias analysis decides that that can not be a
429 // reference to the second field of the interface, and in some cases
430 // it drops the initialization of the second field as a dead store.
431 //go:nosplit
432 func funcPC(f interface{}) uintptr {
433 i := (*iface)(unsafe.Pointer(&f))
434 return **(**uintptr)(i.data)
437 func lockedOSThread() bool {
438 gp := getg()
439 return gp.lockedm != 0 && gp.m.lockedg != 0
442 var (
443 allgs []*g
444 allglock mutex
447 func allgadd(gp *g) {
448 if readgstatus(gp) == _Gidle {
449 throw("allgadd: bad status Gidle")
452 lock(&allglock)
453 allgs = append(allgs, gp)
454 allglen = uintptr(len(allgs))
455 unlock(&allglock)
458 const (
459 // Number of goroutine ids to grab from sched.goidgen to local per-P cache at once.
460 // 16 seems to provide enough amortization, but other than that it's mostly arbitrary number.
461 _GoidCacheBatch = 16
464 // The bootstrap sequence is:
466 // call osinit
467 // call schedinit
468 // make & queue new G
469 // call runtime·mstart
471 // The new G calls runtime·main.
472 func schedinit() {
473 _m_ := &m0
474 _g_ := &g0
475 _m_.g0 = _g_
476 _m_.curg = _g_
477 _g_.m = _m_
478 setg(_g_)
480 sched.maxmcount = 10000
482 mallocinit()
483 mcommoninit(_g_.m)
484 alginit() // maps must not be used before this call
486 msigsave(_g_.m)
487 initSigmask = _g_.m.sigmask
489 goargs()
490 goenvs()
491 parsedebugvars()
492 gcinit()
494 sched.lastpoll = uint64(nanotime())
495 procs := ncpu
496 if n, ok := atoi32(gogetenv("GOMAXPROCS")); ok && n > 0 {
497 procs = n
499 if procresize(procs) != nil {
500 throw("unknown runnable goroutine during bootstrap")
503 // For cgocheck > 1, we turn on the write barrier at all times
504 // and check all pointer writes. We can't do this until after
505 // procresize because the write barrier needs a P.
506 if debug.cgocheck > 1 {
507 writeBarrier.cgo = true
508 writeBarrier.enabled = true
509 for _, p := range allp {
510 p.wbBuf.reset()
514 if buildVersion == "" {
515 // Condition should never trigger. This code just serves
516 // to ensure runtime·buildVersion is kept in the resulting binary.
517 buildVersion = "unknown"
521 func dumpgstatus(gp *g) {
522 _g_ := getg()
523 print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
524 print("runtime: g: g=", _g_, ", goid=", _g_.goid, ", g->atomicstatus=", readgstatus(_g_), "\n")
527 func checkmcount() {
528 // sched lock is held
529 if mcount() > sched.maxmcount {
530 print("runtime: program exceeds ", sched.maxmcount, "-thread limit\n")
531 throw("thread exhaustion")
535 func mcommoninit(mp *m) {
536 _g_ := getg()
538 // g0 stack won't make sense for user (and is not necessary unwindable).
539 if _g_ != _g_.m.g0 {
540 callers(1, mp.createstack[:])
543 lock(&sched.lock)
544 if sched.mnext+1 < sched.mnext {
545 throw("runtime: thread ID overflow")
547 mp.id = sched.mnext
548 sched.mnext++
549 checkmcount()
551 mp.fastrand[0] = 1597334677 * uint32(mp.id)
552 mp.fastrand[1] = uint32(cputicks())
553 if mp.fastrand[0]|mp.fastrand[1] == 0 {
554 mp.fastrand[1] = 1
557 mpreinit(mp)
559 // Add to allm so garbage collector doesn't free g->m
560 // when it is just in a register or thread-local storage.
561 mp.alllink = allm
563 // NumCgoCall() iterates over allm w/o schedlock,
564 // so we need to publish it safely.
565 atomicstorep(unsafe.Pointer(&allm), unsafe.Pointer(mp))
566 unlock(&sched.lock)
569 // Mark gp ready to run.
570 func ready(gp *g, traceskip int, next bool) {
571 if trace.enabled {
572 traceGoUnpark(gp, traceskip)
575 status := readgstatus(gp)
577 // Mark runnable.
578 _g_ := getg()
579 _g_.m.locks++ // disable preemption because it can be holding p in a local var
580 if status&^_Gscan != _Gwaiting {
581 dumpgstatus(gp)
582 throw("bad g->status in ready")
585 // status is Gwaiting or Gscanwaiting, make Grunnable and put on runq
586 casgstatus(gp, _Gwaiting, _Grunnable)
587 runqput(_g_.m.p.ptr(), gp, next)
588 if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 {
589 wakep()
591 _g_.m.locks--
594 func gcprocs() int32 {
595 // Figure out how many CPUs to use during GC.
596 // Limited by gomaxprocs, number of actual CPUs, and MaxGcproc.
597 lock(&sched.lock)
598 n := gomaxprocs
599 if n > ncpu {
600 n = ncpu
602 if n > _MaxGcproc {
603 n = _MaxGcproc
605 if n > sched.nmidle+1 { // one M is currently running
606 n = sched.nmidle + 1
608 unlock(&sched.lock)
609 return n
612 func needaddgcproc() bool {
613 lock(&sched.lock)
614 n := gomaxprocs
615 if n > ncpu {
616 n = ncpu
618 if n > _MaxGcproc {
619 n = _MaxGcproc
621 n -= sched.nmidle + 1 // one M is currently running
622 unlock(&sched.lock)
623 return n > 0
626 func helpgc(nproc int32) {
627 _g_ := getg()
628 lock(&sched.lock)
629 pos := 0
630 for n := int32(1); n < nproc; n++ { // one M is currently running
631 if allp[pos].mcache == _g_.m.mcache {
632 pos++
634 mp := mget()
635 if mp == nil {
636 throw("gcprocs inconsistency")
638 mp.helpgc = n
639 mp.p.set(allp[pos])
640 mp.mcache = allp[pos].mcache
641 pos++
642 notewakeup(&mp.park)
644 unlock(&sched.lock)
647 // freezeStopWait is a large value that freezetheworld sets
648 // sched.stopwait to in order to request that all Gs permanently stop.
649 const freezeStopWait = 0x7fffffff
651 // freezing is set to non-zero if the runtime is trying to freeze the
652 // world.
653 var freezing uint32
655 // Similar to stopTheWorld but best-effort and can be called several times.
656 // There is no reverse operation, used during crashing.
657 // This function must not lock any mutexes.
658 func freezetheworld() {
659 atomic.Store(&freezing, 1)
660 // stopwait and preemption requests can be lost
661 // due to races with concurrently executing threads,
662 // so try several times
663 for i := 0; i < 5; i++ {
664 // this should tell the scheduler to not start any new goroutines
665 sched.stopwait = freezeStopWait
666 atomic.Store(&sched.gcwaiting, 1)
667 // this should stop running goroutines
668 if !preemptall() {
669 break // no running goroutines
671 usleep(1000)
673 // to be sure
674 usleep(1000)
675 preemptall()
676 usleep(1000)
679 func isscanstatus(status uint32) bool {
680 if status == _Gscan {
681 throw("isscanstatus: Bad status Gscan")
683 return status&_Gscan == _Gscan
686 // All reads and writes of g's status go through readgstatus, casgstatus
687 // castogscanstatus, casfrom_Gscanstatus.
688 //go:nosplit
689 func readgstatus(gp *g) uint32 {
690 return atomic.Load(&gp.atomicstatus)
693 // Ownership of gcscanvalid:
695 // If gp is running (meaning status == _Grunning or _Grunning|_Gscan),
696 // then gp owns gp.gcscanvalid, and other goroutines must not modify it.
698 // Otherwise, a second goroutine can lock the scan state by setting _Gscan
699 // in the status bit and then modify gcscanvalid, and then unlock the scan state.
701 // Note that the first condition implies an exception to the second:
702 // if a second goroutine changes gp's status to _Grunning|_Gscan,
703 // that second goroutine still does not have the right to modify gcscanvalid.
705 // The Gscanstatuses are acting like locks and this releases them.
706 // If it proves to be a performance hit we should be able to make these
707 // simple atomic stores but for now we are going to throw if
708 // we see an inconsistent state.
709 func casfrom_Gscanstatus(gp *g, oldval, newval uint32) {
710 success := false
712 // Check that transition is valid.
713 switch oldval {
714 default:
715 print("runtime: casfrom_Gscanstatus bad oldval gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
716 dumpgstatus(gp)
717 throw("casfrom_Gscanstatus:top gp->status is not in scan state")
718 case _Gscanrunnable,
719 _Gscanwaiting,
720 _Gscanrunning,
721 _Gscansyscall:
722 if newval == oldval&^_Gscan {
723 success = atomic.Cas(&gp.atomicstatus, oldval, newval)
726 if !success {
727 print("runtime: casfrom_Gscanstatus failed gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
728 dumpgstatus(gp)
729 throw("casfrom_Gscanstatus: gp->status is not in scan state")
733 // This will return false if the gp is not in the expected status and the cas fails.
734 // This acts like a lock acquire while the casfromgstatus acts like a lock release.
735 func castogscanstatus(gp *g, oldval, newval uint32) bool {
736 switch oldval {
737 case _Grunnable,
738 _Grunning,
739 _Gwaiting,
740 _Gsyscall:
741 if newval == oldval|_Gscan {
742 return atomic.Cas(&gp.atomicstatus, oldval, newval)
745 print("runtime: castogscanstatus oldval=", hex(oldval), " newval=", hex(newval), "\n")
746 throw("castogscanstatus")
747 panic("not reached")
750 // If asked to move to or from a Gscanstatus this will throw. Use the castogscanstatus
751 // and casfrom_Gscanstatus instead.
752 // casgstatus will loop if the g->atomicstatus is in a Gscan status until the routine that
753 // put it in the Gscan state is finished.
754 //go:nosplit
755 func casgstatus(gp *g, oldval, newval uint32) {
756 if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval {
757 systemstack(func() {
758 print("runtime: casgstatus: oldval=", hex(oldval), " newval=", hex(newval), "\n")
759 throw("casgstatus: bad incoming values")
763 if oldval == _Grunning && gp.gcscanvalid {
764 // If oldvall == _Grunning, then the actual status must be
765 // _Grunning or _Grunning|_Gscan; either way,
766 // we own gp.gcscanvalid, so it's safe to read.
767 // gp.gcscanvalid must not be true when we are running.
768 systemstack(func() {
769 print("runtime: casgstatus ", hex(oldval), "->", hex(newval), " gp.status=", hex(gp.atomicstatus), " gp.gcscanvalid=true\n")
770 throw("casgstatus")
774 // See http://golang.org/cl/21503 for justification of the yield delay.
775 const yieldDelay = 5 * 1000
776 var nextYield int64
778 // loop if gp->atomicstatus is in a scan state giving
779 // GC time to finish and change the state to oldval.
780 for i := 0; !atomic.Cas(&gp.atomicstatus, oldval, newval); i++ {
781 if oldval == _Gwaiting && gp.atomicstatus == _Grunnable {
782 systemstack(func() {
783 throw("casgstatus: waiting for Gwaiting but is Grunnable")
786 // Help GC if needed.
787 // if gp.preemptscan && !gp.gcworkdone && (oldval == _Grunning || oldval == _Gsyscall) {
788 // gp.preemptscan = false
789 // systemstack(func() {
790 // gcphasework(gp)
791 // })
792 // }
793 // But meanwhile just yield.
794 if i == 0 {
795 nextYield = nanotime() + yieldDelay
797 if nanotime() < nextYield {
798 for x := 0; x < 10 && gp.atomicstatus != oldval; x++ {
799 procyield(1)
801 } else {
802 osyield()
803 nextYield = nanotime() + yieldDelay/2
806 if newval == _Grunning {
807 gp.gcscanvalid = false
811 // scang blocks until gp's stack has been scanned.
812 // It might be scanned by scang or it might be scanned by the goroutine itself.
813 // Either way, the stack scan has completed when scang returns.
814 func scang(gp *g, gcw *gcWork) {
815 // Invariant; we (the caller, markroot for a specific goroutine) own gp.gcscandone.
816 // Nothing is racing with us now, but gcscandone might be set to true left over
817 // from an earlier round of stack scanning (we scan twice per GC).
818 // We use gcscandone to record whether the scan has been done during this round.
820 gp.gcscandone = false
822 // See http://golang.org/cl/21503 for justification of the yield delay.
823 const yieldDelay = 10 * 1000
824 var nextYield int64
826 // Endeavor to get gcscandone set to true,
827 // either by doing the stack scan ourselves or by coercing gp to scan itself.
828 // gp.gcscandone can transition from false to true when we're not looking
829 // (if we asked for preemption), so any time we lock the status using
830 // castogscanstatus we have to double-check that the scan is still not done.
831 loop:
832 for i := 0; !gp.gcscandone; i++ {
833 switch s := readgstatus(gp); s {
834 default:
835 dumpgstatus(gp)
836 throw("stopg: invalid status")
838 case _Gdead:
839 // No stack.
840 gp.gcscandone = true
841 break loop
843 case _Gcopystack:
844 // Stack being switched. Go around again.
846 case _Grunnable, _Gsyscall, _Gwaiting:
847 // Claim goroutine by setting scan bit.
848 // Racing with execution or readying of gp.
849 // The scan bit keeps them from running
850 // the goroutine until we're done.
851 if castogscanstatus(gp, s, s|_Gscan) {
852 if gp.scanningself {
853 // Don't try to scan the stack
854 // if the goroutine is going to do
855 // it itself.
856 restartg(gp)
857 break
859 if !gp.gcscandone {
860 scanstack(gp, gcw)
861 gp.gcscandone = true
863 restartg(gp)
864 break loop
867 case _Gscanwaiting:
868 // newstack is doing a scan for us right now. Wait.
870 case _Gscanrunning:
871 // checkPreempt is scanning. Wait.
873 case _Grunning:
874 // Goroutine running. Try to preempt execution so it can scan itself.
875 // The preemption handler (in newstack) does the actual scan.
877 // Optimization: if there is already a pending preemption request
878 // (from the previous loop iteration), don't bother with the atomics.
879 if gp.preemptscan && gp.preempt {
880 break
883 // Ask for preemption and self scan.
884 if castogscanstatus(gp, _Grunning, _Gscanrunning) {
885 if !gp.gcscandone {
886 gp.preemptscan = true
887 gp.preempt = true
889 casfrom_Gscanstatus(gp, _Gscanrunning, _Grunning)
893 if i == 0 {
894 nextYield = nanotime() + yieldDelay
896 if nanotime() < nextYield {
897 procyield(10)
898 } else {
899 osyield()
900 nextYield = nanotime() + yieldDelay/2
904 gp.preemptscan = false // cancel scan request if no longer needed
907 // The GC requests that this routine be moved from a scanmumble state to a mumble state.
908 func restartg(gp *g) {
909 s := readgstatus(gp)
910 switch s {
911 default:
912 dumpgstatus(gp)
913 throw("restartg: unexpected status")
915 case _Gdead:
916 // ok
918 case _Gscanrunnable,
919 _Gscanwaiting,
920 _Gscansyscall:
921 casfrom_Gscanstatus(gp, s, s&^_Gscan)
925 // stopTheWorld stops all P's from executing goroutines, interrupting
926 // all goroutines at GC safe points and records reason as the reason
927 // for the stop. On return, only the current goroutine's P is running.
928 // stopTheWorld must not be called from a system stack and the caller
929 // must not hold worldsema. The caller must call startTheWorld when
930 // other P's should resume execution.
932 // stopTheWorld is safe for multiple goroutines to call at the
933 // same time. Each will execute its own stop, and the stops will
934 // be serialized.
936 // This is also used by routines that do stack dumps. If the system is
937 // in panic or being exited, this may not reliably stop all
938 // goroutines.
939 func stopTheWorld(reason string) {
940 semacquire(&worldsema)
941 getg().m.preemptoff = reason
942 systemstack(stopTheWorldWithSema)
945 // startTheWorld undoes the effects of stopTheWorld.
946 func startTheWorld() {
947 systemstack(func() { startTheWorldWithSema(false) })
948 // worldsema must be held over startTheWorldWithSema to ensure
949 // gomaxprocs cannot change while worldsema is held.
950 semrelease(&worldsema)
951 getg().m.preemptoff = ""
954 // Holding worldsema grants an M the right to try to stop the world
955 // and prevents gomaxprocs from changing concurrently.
956 var worldsema uint32 = 1
958 // stopTheWorldWithSema is the core implementation of stopTheWorld.
959 // The caller is responsible for acquiring worldsema and disabling
960 // preemption first and then should stopTheWorldWithSema on the system
961 // stack:
963 // semacquire(&worldsema, 0)
964 // m.preemptoff = "reason"
965 // systemstack(stopTheWorldWithSema)
967 // When finished, the caller must either call startTheWorld or undo
968 // these three operations separately:
970 // m.preemptoff = ""
971 // systemstack(startTheWorldWithSema)
972 // semrelease(&worldsema)
974 // It is allowed to acquire worldsema once and then execute multiple
975 // startTheWorldWithSema/stopTheWorldWithSema pairs.
976 // Other P's are able to execute between successive calls to
977 // startTheWorldWithSema and stopTheWorldWithSema.
978 // Holding worldsema causes any other goroutines invoking
979 // stopTheWorld to block.
980 func stopTheWorldWithSema() {
981 _g_ := getg()
983 // If we hold a lock, then we won't be able to stop another M
984 // that is blocked trying to acquire the lock.
985 if _g_.m.locks > 0 {
986 throw("stopTheWorld: holding locks")
989 lock(&sched.lock)
990 sched.stopwait = gomaxprocs
991 atomic.Store(&sched.gcwaiting, 1)
992 preemptall()
993 // stop current P
994 _g_.m.p.ptr().status = _Pgcstop // Pgcstop is only diagnostic.
995 sched.stopwait--
996 // try to retake all P's in Psyscall status
997 for _, p := range allp {
998 s := p.status
999 if s == _Psyscall && atomic.Cas(&p.status, s, _Pgcstop) {
1000 if trace.enabled {
1001 traceGoSysBlock(p)
1002 traceProcStop(p)
1004 p.syscalltick++
1005 sched.stopwait--
1008 // stop idle P's
1009 for {
1010 p := pidleget()
1011 if p == nil {
1012 break
1014 p.status = _Pgcstop
1015 sched.stopwait--
1017 wait := sched.stopwait > 0
1018 unlock(&sched.lock)
1020 // wait for remaining P's to stop voluntarily
1021 if wait {
1022 for {
1023 // wait for 100us, then try to re-preempt in case of any races
1024 if notetsleep(&sched.stopnote, 100*1000) {
1025 noteclear(&sched.stopnote)
1026 break
1028 preemptall()
1032 // sanity checks
1033 bad := ""
1034 if sched.stopwait != 0 {
1035 bad = "stopTheWorld: not stopped (stopwait != 0)"
1036 } else {
1037 for _, p := range allp {
1038 if p.status != _Pgcstop {
1039 bad = "stopTheWorld: not stopped (status != _Pgcstop)"
1043 if atomic.Load(&freezing) != 0 {
1044 // Some other thread is panicking. This can cause the
1045 // sanity checks above to fail if the panic happens in
1046 // the signal handler on a stopped thread. Either way,
1047 // we should halt this thread.
1048 lock(&deadlock)
1049 lock(&deadlock)
1051 if bad != "" {
1052 throw(bad)
1056 func mhelpgc() {
1057 _g_ := getg()
1058 _g_.m.helpgc = -1
1061 func startTheWorldWithSema(emitTraceEvent bool) int64 {
1062 _g_ := getg()
1064 _g_.m.locks++ // disable preemption because it can be holding p in a local var
1065 if netpollinited() {
1066 gp := netpoll(false) // non-blocking
1067 injectglist(gp)
1069 add := needaddgcproc()
1070 lock(&sched.lock)
1072 procs := gomaxprocs
1073 if newprocs != 0 {
1074 procs = newprocs
1075 newprocs = 0
1077 p1 := procresize(procs)
1078 sched.gcwaiting = 0
1079 if sched.sysmonwait != 0 {
1080 sched.sysmonwait = 0
1081 notewakeup(&sched.sysmonnote)
1083 unlock(&sched.lock)
1085 for p1 != nil {
1086 p := p1
1087 p1 = p1.link.ptr()
1088 if p.m != 0 {
1089 mp := p.m.ptr()
1090 p.m = 0
1091 if mp.nextp != 0 {
1092 throw("startTheWorld: inconsistent mp->nextp")
1094 mp.nextp.set(p)
1095 notewakeup(&mp.park)
1096 } else {
1097 // Start M to run P. Do not start another M below.
1098 newm(nil, p)
1099 add = false
1103 // Capture start-the-world time before doing clean-up tasks.
1104 startTime := nanotime()
1105 if emitTraceEvent {
1106 traceGCSTWDone()
1109 // Wakeup an additional proc in case we have excessive runnable goroutines
1110 // in local queues or in the global queue. If we don't, the proc will park itself.
1111 // If we have lots of excessive work, resetspinning will unpark additional procs as necessary.
1112 if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 {
1113 wakep()
1116 if add {
1117 // If GC could have used another helper proc, start one now,
1118 // in the hope that it will be available next time.
1119 // It would have been even better to start it before the collection,
1120 // but doing so requires allocating memory, so it's tricky to
1121 // coordinate. This lazy approach works out in practice:
1122 // we don't mind if the first couple gc rounds don't have quite
1123 // the maximum number of procs.
1124 newm(mhelpgc, nil)
1126 _g_.m.locks--
1128 return startTime
1131 // First function run by a new goroutine.
1132 // This is passed to makecontext.
1133 func kickoff() {
1134 gp := getg()
1136 if gp.traceback != nil {
1137 gtraceback(gp)
1140 fv := gp.entry
1141 param := gp.param
1142 gp.entry = nil
1144 // When running on the g0 stack we can wind up here without a p,
1145 // for example from mcall(exitsyscall0) in exitsyscall.
1146 // Setting gp.param = nil will call a write barrier, and if
1147 // there is no p that write barrier will crash. When called from
1148 // mcall the gp.param value will be a *g, which we don't need to
1149 // shade since we know it will be kept alive elsewhere. In that
1150 // case clear the field using uintptr so that the write barrier
1151 // does nothing.
1152 if gp.m.p == 0 {
1153 if gp == gp.m.g0 && gp.param == unsafe.Pointer(gp.m.curg) {
1154 *(*uintptr)(unsafe.Pointer(&gp.param)) = 0
1155 } else {
1156 throw("no p in kickoff")
1159 gp.param = nil
1161 fv(param)
1162 goexit1()
1165 func mstart1(dummy int32) {
1166 _g_ := getg()
1168 if _g_ != _g_.m.g0 {
1169 throw("bad runtime·mstart")
1172 asminit()
1174 // Install signal handlers; after minit so that minit can
1175 // prepare the thread to be able to handle the signals.
1176 // For gccgo minit was called by C code.
1177 if _g_.m == &m0 {
1178 mstartm0()
1181 if fn := _g_.m.mstartfn; fn != nil {
1182 fn()
1185 if _g_.m.helpgc != 0 {
1186 _g_.m.helpgc = 0
1187 stopm()
1188 } else if _g_.m != &m0 {
1189 acquirep(_g_.m.nextp.ptr())
1190 _g_.m.nextp = 0
1192 schedule()
1195 // mstartm0 implements part of mstart1 that only runs on the m0.
1197 // Write barriers are allowed here because we know the GC can't be
1198 // running yet, so they'll be no-ops.
1200 //go:yeswritebarrierrec
1201 func mstartm0() {
1202 // Create an extra M for callbacks on threads not created by Go.
1203 if iscgo && !cgoHasExtraM {
1204 cgoHasExtraM = true
1205 newextram()
1207 initsig(false)
1210 // mexit tears down and exits the current thread.
1212 // Don't call this directly to exit the thread, since it must run at
1213 // the top of the thread stack. Instead, use gogo(&_g_.m.g0.sched) to
1214 // unwind the stack to the point that exits the thread.
1216 // It is entered with m.p != nil, so write barriers are allowed. It
1217 // will release the P before exiting.
1219 //go:yeswritebarrierrec
1220 func mexit(osStack bool) {
1221 g := getg()
1222 m := g.m
1224 if m == &m0 {
1225 // This is the main thread. Just wedge it.
1227 // On Linux, exiting the main thread puts the process
1228 // into a non-waitable zombie state. On Plan 9,
1229 // exiting the main thread unblocks wait even though
1230 // other threads are still running. On Solaris we can
1231 // neither exitThread nor return from mstart. Other
1232 // bad things probably happen on other platforms.
1234 // We could try to clean up this M more before wedging
1235 // it, but that complicates signal handling.
1236 handoffp(releasep())
1237 lock(&sched.lock)
1238 sched.nmfreed++
1239 checkdead()
1240 unlock(&sched.lock)
1241 notesleep(&m.park)
1242 throw("locked m0 woke up")
1245 sigblock()
1246 unminit()
1248 // Free the gsignal stack.
1249 if m.gsignal != nil {
1250 stackfree(m.gsignal)
1253 // Remove m from allm.
1254 lock(&sched.lock)
1255 for pprev := &allm; *pprev != nil; pprev = &(*pprev).alllink {
1256 if *pprev == m {
1257 *pprev = m.alllink
1258 goto found
1261 throw("m not found in allm")
1262 found:
1263 if !osStack {
1264 // Delay reaping m until it's done with the stack.
1266 // If this is using an OS stack, the OS will free it
1267 // so there's no need for reaping.
1268 atomic.Store(&m.freeWait, 1)
1269 // Put m on the free list, though it will not be reaped until
1270 // freeWait is 0. Note that the free list must not be linked
1271 // through alllink because some functions walk allm without
1272 // locking, so may be using alllink.
1273 m.freelink = sched.freem
1274 sched.freem = m
1276 unlock(&sched.lock)
1278 // Release the P.
1279 handoffp(releasep())
1280 // After this point we must not have write barriers.
1282 // Invoke the deadlock detector. This must happen after
1283 // handoffp because it may have started a new M to take our
1284 // P's work.
1285 lock(&sched.lock)
1286 sched.nmfreed++
1287 checkdead()
1288 unlock(&sched.lock)
1290 if osStack {
1291 // Return from mstart and let the system thread
1292 // library free the g0 stack and terminate the thread.
1293 return
1296 // mstart is the thread's entry point, so there's nothing to
1297 // return to. Exit the thread directly. exitThread will clear
1298 // m.freeWait when it's done with the stack and the m can be
1299 // reaped.
1300 exitThread(&m.freeWait)
1303 // forEachP calls fn(p) for every P p when p reaches a GC safe point.
1304 // If a P is currently executing code, this will bring the P to a GC
1305 // safe point and execute fn on that P. If the P is not executing code
1306 // (it is idle or in a syscall), this will call fn(p) directly while
1307 // preventing the P from exiting its state. This does not ensure that
1308 // fn will run on every CPU executing Go code, but it acts as a global
1309 // memory barrier. GC uses this as a "ragged barrier."
1311 // The caller must hold worldsema.
1313 //go:systemstack
1314 func forEachP(fn func(*p)) {
1315 mp := acquirem()
1316 _p_ := getg().m.p.ptr()
1318 lock(&sched.lock)
1319 if sched.safePointWait != 0 {
1320 throw("forEachP: sched.safePointWait != 0")
1322 sched.safePointWait = gomaxprocs - 1
1323 sched.safePointFn = fn
1325 // Ask all Ps to run the safe point function.
1326 for _, p := range allp {
1327 if p != _p_ {
1328 atomic.Store(&p.runSafePointFn, 1)
1331 preemptall()
1333 // Any P entering _Pidle or _Psyscall from now on will observe
1334 // p.runSafePointFn == 1 and will call runSafePointFn when
1335 // changing its status to _Pidle/_Psyscall.
1337 // Run safe point function for all idle Ps. sched.pidle will
1338 // not change because we hold sched.lock.
1339 for p := sched.pidle.ptr(); p != nil; p = p.link.ptr() {
1340 if atomic.Cas(&p.runSafePointFn, 1, 0) {
1341 fn(p)
1342 sched.safePointWait--
1346 wait := sched.safePointWait > 0
1347 unlock(&sched.lock)
1349 // Run fn for the current P.
1350 fn(_p_)
1352 // Force Ps currently in _Psyscall into _Pidle and hand them
1353 // off to induce safe point function execution.
1354 for _, p := range allp {
1355 s := p.status
1356 if s == _Psyscall && p.runSafePointFn == 1 && atomic.Cas(&p.status, s, _Pidle) {
1357 if trace.enabled {
1358 traceGoSysBlock(p)
1359 traceProcStop(p)
1361 p.syscalltick++
1362 handoffp(p)
1366 // Wait for remaining Ps to run fn.
1367 if wait {
1368 for {
1369 // Wait for 100us, then try to re-preempt in
1370 // case of any races.
1372 // Requires system stack.
1373 if notetsleep(&sched.safePointNote, 100*1000) {
1374 noteclear(&sched.safePointNote)
1375 break
1377 preemptall()
1380 if sched.safePointWait != 0 {
1381 throw("forEachP: not done")
1383 for _, p := range allp {
1384 if p.runSafePointFn != 0 {
1385 throw("forEachP: P did not run fn")
1389 lock(&sched.lock)
1390 sched.safePointFn = nil
1391 unlock(&sched.lock)
1392 releasem(mp)
1395 // runSafePointFn runs the safe point function, if any, for this P.
1396 // This should be called like
1398 // if getg().m.p.runSafePointFn != 0 {
1399 // runSafePointFn()
1400 // }
1402 // runSafePointFn must be checked on any transition in to _Pidle or
1403 // _Psyscall to avoid a race where forEachP sees that the P is running
1404 // just before the P goes into _Pidle/_Psyscall and neither forEachP
1405 // nor the P run the safe-point function.
1406 func runSafePointFn() {
1407 p := getg().m.p.ptr()
1408 // Resolve the race between forEachP running the safe-point
1409 // function on this P's behalf and this P running the
1410 // safe-point function directly.
1411 if !atomic.Cas(&p.runSafePointFn, 1, 0) {
1412 return
1414 sched.safePointFn(p)
1415 lock(&sched.lock)
1416 sched.safePointWait--
1417 if sched.safePointWait == 0 {
1418 notewakeup(&sched.safePointNote)
1420 unlock(&sched.lock)
1423 // Allocate a new m unassociated with any thread.
1424 // Can use p for allocation context if needed.
1425 // fn is recorded as the new m's m.mstartfn.
1427 // This function is allowed to have write barriers even if the caller
1428 // isn't because it borrows _p_.
1430 //go:yeswritebarrierrec
1431 func allocm(_p_ *p, fn func(), allocatestack bool) (mp *m, g0Stack unsafe.Pointer, g0StackSize uintptr) {
1432 _g_ := getg()
1433 _g_.m.locks++ // disable GC because it can be called from sysmon
1434 if _g_.m.p == 0 {
1435 acquirep(_p_) // temporarily borrow p for mallocs in this function
1438 // Release the free M list. We need to do this somewhere and
1439 // this may free up a stack we can use.
1440 if sched.freem != nil {
1441 lock(&sched.lock)
1442 var newList *m
1443 for freem := sched.freem; freem != nil; {
1444 if freem.freeWait != 0 {
1445 next := freem.freelink
1446 freem.freelink = newList
1447 newList = freem
1448 freem = next
1449 continue
1451 stackfree(freem.g0)
1452 freem = freem.freelink
1454 sched.freem = newList
1455 unlock(&sched.lock)
1458 mp = new(m)
1459 mp.mstartfn = fn
1460 mcommoninit(mp)
1462 mp.g0 = malg(allocatestack, false, &g0Stack, &g0StackSize)
1463 mp.g0.m = mp
1465 if _p_ == _g_.m.p.ptr() {
1466 releasep()
1468 _g_.m.locks--
1470 return mp, g0Stack, g0StackSize
1473 // needm is called when a cgo callback happens on a
1474 // thread without an m (a thread not created by Go).
1475 // In this case, needm is expected to find an m to use
1476 // and return with m, g initialized correctly.
1477 // Since m and g are not set now (likely nil, but see below)
1478 // needm is limited in what routines it can call. In particular
1479 // it can only call nosplit functions (textflag 7) and cannot
1480 // do any scheduling that requires an m.
1482 // In order to avoid needing heavy lifting here, we adopt
1483 // the following strategy: there is a stack of available m's
1484 // that can be stolen. Using compare-and-swap
1485 // to pop from the stack has ABA races, so we simulate
1486 // a lock by doing an exchange (via casp) to steal the stack
1487 // head and replace the top pointer with MLOCKED (1).
1488 // This serves as a simple spin lock that we can use even
1489 // without an m. The thread that locks the stack in this way
1490 // unlocks the stack by storing a valid stack head pointer.
1492 // In order to make sure that there is always an m structure
1493 // available to be stolen, we maintain the invariant that there
1494 // is always one more than needed. At the beginning of the
1495 // program (if cgo is in use) the list is seeded with a single m.
1496 // If needm finds that it has taken the last m off the list, its job
1497 // is - once it has installed its own m so that it can do things like
1498 // allocate memory - to create a spare m and put it on the list.
1500 // Each of these extra m's also has a g0 and a curg that are
1501 // pressed into service as the scheduling stack and current
1502 // goroutine for the duration of the cgo callback.
1504 // When the callback is done with the m, it calls dropm to
1505 // put the m back on the list.
1506 //go:nosplit
1507 func needm(x byte) {
1508 if iscgo && !cgoHasExtraM {
1509 // Can happen if C/C++ code calls Go from a global ctor.
1510 // Can not throw, because scheduler is not initialized yet.
1511 write(2, unsafe.Pointer(&earlycgocallback[0]), int32(len(earlycgocallback)))
1512 exit(1)
1515 // Lock extra list, take head, unlock popped list.
1516 // nilokay=false is safe here because of the invariant above,
1517 // that the extra list always contains or will soon contain
1518 // at least one m.
1519 mp := lockextra(false)
1521 // Set needextram when we've just emptied the list,
1522 // so that the eventual call into cgocallbackg will
1523 // allocate a new m for the extra list. We delay the
1524 // allocation until then so that it can be done
1525 // after exitsyscall makes sure it is okay to be
1526 // running at all (that is, there's no garbage collection
1527 // running right now).
1528 mp.needextram = mp.schedlink == 0
1529 extraMCount--
1530 unlockextra(mp.schedlink.ptr())
1532 // Save and block signals before installing g.
1533 // Once g is installed, any incoming signals will try to execute,
1534 // but we won't have the sigaltstack settings and other data
1535 // set up appropriately until the end of minit, which will
1536 // unblock the signals. This is the same dance as when
1537 // starting a new m to run Go code via newosproc.
1538 msigsave(mp)
1539 sigblock()
1541 // Install g (= m->curg).
1542 setg(mp.curg)
1544 // Initialize this thread to use the m.
1545 asminit()
1546 minit()
1548 setGContext()
1550 // mp.curg is now a real goroutine.
1551 casgstatus(mp.curg, _Gdead, _Gsyscall)
1552 atomic.Xadd(&sched.ngsys, -1)
1555 var earlycgocallback = []byte("fatal error: cgo callback before cgo call\n")
1557 // newextram allocates m's and puts them on the extra list.
1558 // It is called with a working local m, so that it can do things
1559 // like call schedlock and allocate.
1560 func newextram() {
1561 c := atomic.Xchg(&extraMWaiters, 0)
1562 if c > 0 {
1563 for i := uint32(0); i < c; i++ {
1564 oneNewExtraM()
1566 } else {
1567 // Make sure there is at least one extra M.
1568 mp := lockextra(true)
1569 unlockextra(mp)
1570 if mp == nil {
1571 oneNewExtraM()
1576 // oneNewExtraM allocates an m and puts it on the extra list.
1577 func oneNewExtraM() {
1578 // Create extra goroutine locked to extra m.
1579 // The goroutine is the context in which the cgo callback will run.
1580 // The sched.pc will never be returned to, but setting it to
1581 // goexit makes clear to the traceback routines where
1582 // the goroutine stack ends.
1583 mp, g0SP, g0SPSize := allocm(nil, nil, true)
1584 gp := malg(true, false, nil, nil)
1585 gp.gcscanvalid = true
1586 gp.gcscandone = true
1587 // malg returns status as _Gidle. Change to _Gdead before
1588 // adding to allg where GC can see it. We use _Gdead to hide
1589 // this from tracebacks and stack scans since it isn't a
1590 // "real" goroutine until needm grabs it.
1591 casgstatus(gp, _Gidle, _Gdead)
1592 gp.m = mp
1593 mp.curg = gp
1594 mp.lockedInt++
1595 mp.lockedg.set(gp)
1596 gp.lockedm.set(mp)
1597 gp.goid = int64(atomic.Xadd64(&sched.goidgen, 1))
1598 // put on allg for garbage collector
1599 allgadd(gp)
1601 // The context for gp will be set up in needm.
1602 // Here we need to set the context for g0.
1603 makeGContext(mp.g0, g0SP, g0SPSize)
1605 // gp is now on the allg list, but we don't want it to be
1606 // counted by gcount. It would be more "proper" to increment
1607 // sched.ngfree, but that requires locking. Incrementing ngsys
1608 // has the same effect.
1609 atomic.Xadd(&sched.ngsys, +1)
1611 // Add m to the extra list.
1612 mnext := lockextra(true)
1613 mp.schedlink.set(mnext)
1614 extraMCount++
1615 unlockextra(mp)
1618 // dropm is called when a cgo callback has called needm but is now
1619 // done with the callback and returning back into the non-Go thread.
1620 // It puts the current m back onto the extra list.
1622 // The main expense here is the call to signalstack to release the
1623 // m's signal stack, and then the call to needm on the next callback
1624 // from this thread. It is tempting to try to save the m for next time,
1625 // which would eliminate both these costs, but there might not be
1626 // a next time: the current thread (which Go does not control) might exit.
1627 // If we saved the m for that thread, there would be an m leak each time
1628 // such a thread exited. Instead, we acquire and release an m on each
1629 // call. These should typically not be scheduling operations, just a few
1630 // atomics, so the cost should be small.
1632 // TODO(rsc): An alternative would be to allocate a dummy pthread per-thread
1633 // variable using pthread_key_create. Unlike the pthread keys we already use
1634 // on OS X, this dummy key would never be read by Go code. It would exist
1635 // only so that we could register at thread-exit-time destructor.
1636 // That destructor would put the m back onto the extra list.
1637 // This is purely a performance optimization. The current version,
1638 // in which dropm happens on each cgo call, is still correct too.
1639 // We may have to keep the current version on systems with cgo
1640 // but without pthreads, like Windows.
1642 // CgocallBackDone calls this after releasing p, so no write barriers.
1643 //go:nowritebarrierrec
1644 func dropm() {
1645 // Clear m and g, and return m to the extra list.
1646 // After the call to setg we can only call nosplit functions
1647 // with no pointer manipulation.
1648 mp := getg().m
1650 // Return mp.curg to dead state.
1651 casgstatus(mp.curg, _Gsyscall, _Gdead)
1652 atomic.Xadd(&sched.ngsys, +1)
1654 // Block signals before unminit.
1655 // Unminit unregisters the signal handling stack (but needs g on some systems).
1656 // Setg(nil) clears g, which is the signal handler's cue not to run Go handlers.
1657 // It's important not to try to handle a signal between those two steps.
1658 sigmask := mp.sigmask
1659 sigblock()
1660 unminit()
1662 // gccgo sets the stack to Gdead here, because the splitstack
1663 // context is not initialized.
1664 atomic.Store(&mp.curg.atomicstatus, _Gdead)
1665 mp.curg.gcstack = 0
1666 mp.curg.gcnextsp = 0
1668 mnext := lockextra(true)
1669 extraMCount++
1670 mp.schedlink.set(mnext)
1672 setg(nil)
1674 // Commit the release of mp.
1675 unlockextra(mp)
1677 msigrestore(sigmask)
1680 // A helper function for EnsureDropM.
1681 func getm() uintptr {
1682 return uintptr(unsafe.Pointer(getg().m))
1685 var extram uintptr
1686 var extraMCount uint32 // Protected by lockextra
1687 var extraMWaiters uint32
1689 // lockextra locks the extra list and returns the list head.
1690 // The caller must unlock the list by storing a new list head
1691 // to extram. If nilokay is true, then lockextra will
1692 // return a nil list head if that's what it finds. If nilokay is false,
1693 // lockextra will keep waiting until the list head is no longer nil.
1694 //go:nosplit
1695 //go:nowritebarrierrec
1696 func lockextra(nilokay bool) *m {
1697 const locked = 1
1699 incr := false
1700 for {
1701 old := atomic.Loaduintptr(&extram)
1702 if old == locked {
1703 yield := osyield
1704 yield()
1705 continue
1707 if old == 0 && !nilokay {
1708 if !incr {
1709 // Add 1 to the number of threads
1710 // waiting for an M.
1711 // This is cleared by newextram.
1712 atomic.Xadd(&extraMWaiters, 1)
1713 incr = true
1715 usleep(1)
1716 continue
1718 if atomic.Casuintptr(&extram, old, locked) {
1719 return (*m)(unsafe.Pointer(old))
1721 yield := osyield
1722 yield()
1723 continue
1727 //go:nosplit
1728 //go:nowritebarrierrec
1729 func unlockextra(mp *m) {
1730 atomic.Storeuintptr(&extram, uintptr(unsafe.Pointer(mp)))
1733 // execLock serializes exec and clone to avoid bugs or unspecified behaviour
1734 // around exec'ing while creating/destroying threads. See issue #19546.
1735 var execLock rwmutex
1737 // newmHandoff contains a list of m structures that need new OS threads.
1738 // This is used by newm in situations where newm itself can't safely
1739 // start an OS thread.
1740 var newmHandoff struct {
1741 lock mutex
1743 // newm points to a list of M structures that need new OS
1744 // threads. The list is linked through m.schedlink.
1745 newm muintptr
1747 // waiting indicates that wake needs to be notified when an m
1748 // is put on the list.
1749 waiting bool
1750 wake note
1752 // haveTemplateThread indicates that the templateThread has
1753 // been started. This is not protected by lock. Use cas to set
1754 // to 1.
1755 haveTemplateThread uint32
1758 // Create a new m. It will start off with a call to fn, or else the scheduler.
1759 // fn needs to be static and not a heap allocated closure.
1760 // May run with m.p==nil, so write barriers are not allowed.
1761 //go:nowritebarrierrec
1762 func newm(fn func(), _p_ *p) {
1763 mp, _, _ := allocm(_p_, fn, false)
1764 mp.nextp.set(_p_)
1765 mp.sigmask = initSigmask
1766 if gp := getg(); gp != nil && gp.m != nil && (gp.m.lockedExt != 0 || gp.m.incgo) && GOOS != "plan9" {
1767 // We're on a locked M or a thread that may have been
1768 // started by C. The kernel state of this thread may
1769 // be strange (the user may have locked it for that
1770 // purpose). We don't want to clone that into another
1771 // thread. Instead, ask a known-good thread to create
1772 // the thread for us.
1774 // This is disabled on Plan 9. See golang.org/issue/22227.
1776 // TODO: This may be unnecessary on Windows, which
1777 // doesn't model thread creation off fork.
1778 lock(&newmHandoff.lock)
1779 if newmHandoff.haveTemplateThread == 0 {
1780 throw("on a locked thread with no template thread")
1782 mp.schedlink = newmHandoff.newm
1783 newmHandoff.newm.set(mp)
1784 if newmHandoff.waiting {
1785 newmHandoff.waiting = false
1786 notewakeup(&newmHandoff.wake)
1788 unlock(&newmHandoff.lock)
1789 return
1791 newm1(mp)
1794 func newm1(mp *m) {
1795 execLock.rlock() // Prevent process clone.
1796 newosproc(mp)
1797 execLock.runlock()
1800 // startTemplateThread starts the template thread if it is not already
1801 // running.
1803 // The calling thread must itself be in a known-good state.
1804 func startTemplateThread() {
1805 if !atomic.Cas(&newmHandoff.haveTemplateThread, 0, 1) {
1806 return
1808 newm(templateThread, nil)
1811 // tmeplateThread is a thread in a known-good state that exists solely
1812 // to start new threads in known-good states when the calling thread
1813 // may not be a a good state.
1815 // Many programs never need this, so templateThread is started lazily
1816 // when we first enter a state that might lead to running on a thread
1817 // in an unknown state.
1819 // templateThread runs on an M without a P, so it must not have write
1820 // barriers.
1822 //go:nowritebarrierrec
1823 func templateThread() {
1824 lock(&sched.lock)
1825 sched.nmsys++
1826 checkdead()
1827 unlock(&sched.lock)
1829 for {
1830 lock(&newmHandoff.lock)
1831 for newmHandoff.newm != 0 {
1832 newm := newmHandoff.newm.ptr()
1833 newmHandoff.newm = 0
1834 unlock(&newmHandoff.lock)
1835 for newm != nil {
1836 next := newm.schedlink.ptr()
1837 newm.schedlink = 0
1838 newm1(newm)
1839 newm = next
1841 lock(&newmHandoff.lock)
1843 newmHandoff.waiting = true
1844 noteclear(&newmHandoff.wake)
1845 unlock(&newmHandoff.lock)
1846 notesleep(&newmHandoff.wake)
1850 // Stops execution of the current m until new work is available.
1851 // Returns with acquired P.
1852 func stopm() {
1853 _g_ := getg()
1855 if _g_.m.locks != 0 {
1856 throw("stopm holding locks")
1858 if _g_.m.p != 0 {
1859 throw("stopm holding p")
1861 if _g_.m.spinning {
1862 throw("stopm spinning")
1865 retry:
1866 lock(&sched.lock)
1867 mput(_g_.m)
1868 unlock(&sched.lock)
1869 notesleep(&_g_.m.park)
1870 noteclear(&_g_.m.park)
1871 if _g_.m.helpgc != 0 {
1872 // helpgc() set _g_.m.p and _g_.m.mcache, so we have a P.
1873 gchelper()
1874 // Undo the effects of helpgc().
1875 _g_.m.helpgc = 0
1876 _g_.m.mcache = nil
1877 _g_.m.p = 0
1878 goto retry
1880 acquirep(_g_.m.nextp.ptr())
1881 _g_.m.nextp = 0
1884 func mspinning() {
1885 // startm's caller incremented nmspinning. Set the new M's spinning.
1886 getg().m.spinning = true
1889 // Schedules some M to run the p (creates an M if necessary).
1890 // If p==nil, tries to get an idle P, if no idle P's does nothing.
1891 // May run with m.p==nil, so write barriers are not allowed.
1892 // If spinning is set, the caller has incremented nmspinning and startm will
1893 // either decrement nmspinning or set m.spinning in the newly started M.
1894 //go:nowritebarrierrec
1895 func startm(_p_ *p, spinning bool) {
1896 lock(&sched.lock)
1897 if _p_ == nil {
1898 _p_ = pidleget()
1899 if _p_ == nil {
1900 unlock(&sched.lock)
1901 if spinning {
1902 // The caller incremented nmspinning, but there are no idle Ps,
1903 // so it's okay to just undo the increment and give up.
1904 if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 {
1905 throw("startm: negative nmspinning")
1908 return
1911 mp := mget()
1912 unlock(&sched.lock)
1913 if mp == nil {
1914 var fn func()
1915 if spinning {
1916 // The caller incremented nmspinning, so set m.spinning in the new M.
1917 fn = mspinning
1919 newm(fn, _p_)
1920 return
1922 if mp.spinning {
1923 throw("startm: m is spinning")
1925 if mp.nextp != 0 {
1926 throw("startm: m has p")
1928 if spinning && !runqempty(_p_) {
1929 throw("startm: p has runnable gs")
1931 // The caller incremented nmspinning, so set m.spinning in the new M.
1932 mp.spinning = spinning
1933 mp.nextp.set(_p_)
1934 notewakeup(&mp.park)
1937 // Hands off P from syscall or locked M.
1938 // Always runs without a P, so write barriers are not allowed.
1939 //go:nowritebarrierrec
1940 func handoffp(_p_ *p) {
1941 // handoffp must start an M in any situation where
1942 // findrunnable would return a G to run on _p_.
1944 // if it has local work, start it straight away
1945 if !runqempty(_p_) || sched.runqsize != 0 {
1946 startm(_p_, false)
1947 return
1949 // if it has GC work, start it straight away
1950 if gcBlackenEnabled != 0 && gcMarkWorkAvailable(_p_) {
1951 startm(_p_, false)
1952 return
1954 // no local work, check that there are no spinning/idle M's,
1955 // otherwise our help is not required
1956 if atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) == 0 && atomic.Cas(&sched.nmspinning, 0, 1) { // TODO: fast atomic
1957 startm(_p_, true)
1958 return
1960 lock(&sched.lock)
1961 if sched.gcwaiting != 0 {
1962 _p_.status = _Pgcstop
1963 sched.stopwait--
1964 if sched.stopwait == 0 {
1965 notewakeup(&sched.stopnote)
1967 unlock(&sched.lock)
1968 return
1970 if _p_.runSafePointFn != 0 && atomic.Cas(&_p_.runSafePointFn, 1, 0) {
1971 sched.safePointFn(_p_)
1972 sched.safePointWait--
1973 if sched.safePointWait == 0 {
1974 notewakeup(&sched.safePointNote)
1977 if sched.runqsize != 0 {
1978 unlock(&sched.lock)
1979 startm(_p_, false)
1980 return
1982 // If this is the last running P and nobody is polling network,
1983 // need to wakeup another M to poll network.
1984 if sched.npidle == uint32(gomaxprocs-1) && atomic.Load64(&sched.lastpoll) != 0 {
1985 unlock(&sched.lock)
1986 startm(_p_, false)
1987 return
1989 pidleput(_p_)
1990 unlock(&sched.lock)
1993 // Tries to add one more P to execute G's.
1994 // Called when a G is made runnable (newproc, ready).
1995 func wakep() {
1996 // be conservative about spinning threads
1997 if !atomic.Cas(&sched.nmspinning, 0, 1) {
1998 return
2000 startm(nil, true)
2003 // Stops execution of the current m that is locked to a g until the g is runnable again.
2004 // Returns with acquired P.
2005 func stoplockedm() {
2006 _g_ := getg()
2008 if _g_.m.lockedg == 0 || _g_.m.lockedg.ptr().lockedm.ptr() != _g_.m {
2009 throw("stoplockedm: inconsistent locking")
2011 if _g_.m.p != 0 {
2012 // Schedule another M to run this p.
2013 _p_ := releasep()
2014 handoffp(_p_)
2016 incidlelocked(1)
2017 // Wait until another thread schedules lockedg again.
2018 notesleep(&_g_.m.park)
2019 noteclear(&_g_.m.park)
2020 status := readgstatus(_g_.m.lockedg.ptr())
2021 if status&^_Gscan != _Grunnable {
2022 print("runtime:stoplockedm: g is not Grunnable or Gscanrunnable\n")
2023 dumpgstatus(_g_)
2024 throw("stoplockedm: not runnable")
2026 acquirep(_g_.m.nextp.ptr())
2027 _g_.m.nextp = 0
2030 // Schedules the locked m to run the locked gp.
2031 // May run during STW, so write barriers are not allowed.
2032 //go:nowritebarrierrec
2033 func startlockedm(gp *g) {
2034 _g_ := getg()
2036 mp := gp.lockedm.ptr()
2037 if mp == _g_.m {
2038 throw("startlockedm: locked to me")
2040 if mp.nextp != 0 {
2041 throw("startlockedm: m has p")
2043 // directly handoff current P to the locked m
2044 incidlelocked(-1)
2045 _p_ := releasep()
2046 mp.nextp.set(_p_)
2047 notewakeup(&mp.park)
2048 stopm()
2051 // Stops the current m for stopTheWorld.
2052 // Returns when the world is restarted.
2053 func gcstopm() {
2054 _g_ := getg()
2056 if sched.gcwaiting == 0 {
2057 throw("gcstopm: not waiting for gc")
2059 if _g_.m.spinning {
2060 _g_.m.spinning = false
2061 // OK to just drop nmspinning here,
2062 // startTheWorld will unpark threads as necessary.
2063 if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 {
2064 throw("gcstopm: negative nmspinning")
2067 _p_ := releasep()
2068 lock(&sched.lock)
2069 _p_.status = _Pgcstop
2070 sched.stopwait--
2071 if sched.stopwait == 0 {
2072 notewakeup(&sched.stopnote)
2074 unlock(&sched.lock)
2075 stopm()
2078 // Schedules gp to run on the current M.
2079 // If inheritTime is true, gp inherits the remaining time in the
2080 // current time slice. Otherwise, it starts a new time slice.
2081 // Never returns.
2083 // Write barriers are allowed because this is called immediately after
2084 // acquiring a P in several places.
2086 //go:yeswritebarrierrec
2087 func execute(gp *g, inheritTime bool) {
2088 _g_ := getg()
2090 casgstatus(gp, _Grunnable, _Grunning)
2091 gp.waitsince = 0
2092 gp.preempt = false
2093 if !inheritTime {
2094 _g_.m.p.ptr().schedtick++
2096 _g_.m.curg = gp
2097 gp.m = _g_.m
2099 // Check whether the profiler needs to be turned on or off.
2100 hz := sched.profilehz
2101 if _g_.m.profilehz != hz {
2102 setThreadCPUProfiler(hz)
2105 if trace.enabled {
2106 // GoSysExit has to happen when we have a P, but before GoStart.
2107 // So we emit it here.
2108 if gp.syscallsp != 0 && gp.sysblocktraced {
2109 traceGoSysExit(gp.sysexitticks)
2111 traceGoStart()
2114 gogo(gp)
2117 // Finds a runnable goroutine to execute.
2118 // Tries to steal from other P's, get g from global queue, poll network.
2119 func findrunnable() (gp *g, inheritTime bool) {
2120 _g_ := getg()
2122 // The conditions here and in handoffp must agree: if
2123 // findrunnable would return a G to run, handoffp must start
2124 // an M.
2126 top:
2127 _p_ := _g_.m.p.ptr()
2128 if sched.gcwaiting != 0 {
2129 gcstopm()
2130 goto top
2132 if _p_.runSafePointFn != 0 {
2133 runSafePointFn()
2135 if fingwait && fingwake {
2136 if gp := wakefing(); gp != nil {
2137 ready(gp, 0, true)
2140 if *cgo_yield != nil {
2141 asmcgocall(*cgo_yield, nil)
2144 // local runq
2145 if gp, inheritTime := runqget(_p_); gp != nil {
2146 return gp, inheritTime
2149 // global runq
2150 if sched.runqsize != 0 {
2151 lock(&sched.lock)
2152 gp := globrunqget(_p_, 0)
2153 unlock(&sched.lock)
2154 if gp != nil {
2155 return gp, false
2159 // Poll network.
2160 // This netpoll is only an optimization before we resort to stealing.
2161 // We can safely skip it if there are no waiters or a thread is blocked
2162 // in netpoll already. If there is any kind of logical race with that
2163 // blocked thread (e.g. it has already returned from netpoll, but does
2164 // not set lastpoll yet), this thread will do blocking netpoll below
2165 // anyway.
2166 if netpollinited() && atomic.Load(&netpollWaiters) > 0 && atomic.Load64(&sched.lastpoll) != 0 {
2167 if gp := netpoll(false); gp != nil { // non-blocking
2168 // netpoll returns list of goroutines linked by schedlink.
2169 injectglist(gp.schedlink.ptr())
2170 casgstatus(gp, _Gwaiting, _Grunnable)
2171 if trace.enabled {
2172 traceGoUnpark(gp, 0)
2174 return gp, false
2178 // Steal work from other P's.
2179 procs := uint32(gomaxprocs)
2180 if atomic.Load(&sched.npidle) == procs-1 {
2181 // Either GOMAXPROCS=1 or everybody, except for us, is idle already.
2182 // New work can appear from returning syscall/cgocall, network or timers.
2183 // Neither of that submits to local run queues, so no point in stealing.
2184 goto stop
2186 // If number of spinning M's >= number of busy P's, block.
2187 // This is necessary to prevent excessive CPU consumption
2188 // when GOMAXPROCS>>1 but the program parallelism is low.
2189 if !_g_.m.spinning && 2*atomic.Load(&sched.nmspinning) >= procs-atomic.Load(&sched.npidle) {
2190 goto stop
2192 if !_g_.m.spinning {
2193 _g_.m.spinning = true
2194 atomic.Xadd(&sched.nmspinning, 1)
2196 for i := 0; i < 4; i++ {
2197 for enum := stealOrder.start(fastrand()); !enum.done(); enum.next() {
2198 if sched.gcwaiting != 0 {
2199 goto top
2201 stealRunNextG := i > 2 // first look for ready queues with more than 1 g
2202 if gp := runqsteal(_p_, allp[enum.position()], stealRunNextG); gp != nil {
2203 return gp, false
2208 stop:
2210 // We have nothing to do. If we're in the GC mark phase, can
2211 // safely scan and blacken objects, and have work to do, run
2212 // idle-time marking rather than give up the P.
2213 if gcBlackenEnabled != 0 && _p_.gcBgMarkWorker != 0 && gcMarkWorkAvailable(_p_) {
2214 _p_.gcMarkWorkerMode = gcMarkWorkerIdleMode
2215 gp := _p_.gcBgMarkWorker.ptr()
2216 casgstatus(gp, _Gwaiting, _Grunnable)
2217 if trace.enabled {
2218 traceGoUnpark(gp, 0)
2220 return gp, false
2223 // Before we drop our P, make a snapshot of the allp slice,
2224 // which can change underfoot once we no longer block
2225 // safe-points. We don't need to snapshot the contents because
2226 // everything up to cap(allp) is immutable.
2227 allpSnapshot := allp
2229 // return P and block
2230 lock(&sched.lock)
2231 if sched.gcwaiting != 0 || _p_.runSafePointFn != 0 {
2232 unlock(&sched.lock)
2233 goto top
2235 if sched.runqsize != 0 {
2236 gp := globrunqget(_p_, 0)
2237 unlock(&sched.lock)
2238 return gp, false
2240 if releasep() != _p_ {
2241 throw("findrunnable: wrong p")
2243 pidleput(_p_)
2244 unlock(&sched.lock)
2246 // Delicate dance: thread transitions from spinning to non-spinning state,
2247 // potentially concurrently with submission of new goroutines. We must
2248 // drop nmspinning first and then check all per-P queues again (with
2249 // #StoreLoad memory barrier in between). If we do it the other way around,
2250 // another thread can submit a goroutine after we've checked all run queues
2251 // but before we drop nmspinning; as the result nobody will unpark a thread
2252 // to run the goroutine.
2253 // If we discover new work below, we need to restore m.spinning as a signal
2254 // for resetspinning to unpark a new worker thread (because there can be more
2255 // than one starving goroutine). However, if after discovering new work
2256 // we also observe no idle Ps, it is OK to just park the current thread:
2257 // the system is fully loaded so no spinning threads are required.
2258 // Also see "Worker thread parking/unparking" comment at the top of the file.
2259 wasSpinning := _g_.m.spinning
2260 if _g_.m.spinning {
2261 _g_.m.spinning = false
2262 if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 {
2263 throw("findrunnable: negative nmspinning")
2267 // check all runqueues once again
2268 for _, _p_ := range allpSnapshot {
2269 if !runqempty(_p_) {
2270 lock(&sched.lock)
2271 _p_ = pidleget()
2272 unlock(&sched.lock)
2273 if _p_ != nil {
2274 acquirep(_p_)
2275 if wasSpinning {
2276 _g_.m.spinning = true
2277 atomic.Xadd(&sched.nmspinning, 1)
2279 goto top
2281 break
2285 // Check for idle-priority GC work again.
2286 if gcBlackenEnabled != 0 && gcMarkWorkAvailable(nil) {
2287 lock(&sched.lock)
2288 _p_ = pidleget()
2289 if _p_ != nil && _p_.gcBgMarkWorker == 0 {
2290 pidleput(_p_)
2291 _p_ = nil
2293 unlock(&sched.lock)
2294 if _p_ != nil {
2295 acquirep(_p_)
2296 if wasSpinning {
2297 _g_.m.spinning = true
2298 atomic.Xadd(&sched.nmspinning, 1)
2300 // Go back to idle GC check.
2301 goto stop
2305 // poll network
2306 if netpollinited() && atomic.Load(&netpollWaiters) > 0 && atomic.Xchg64(&sched.lastpoll, 0) != 0 {
2307 if _g_.m.p != 0 {
2308 throw("findrunnable: netpoll with p")
2310 if _g_.m.spinning {
2311 throw("findrunnable: netpoll with spinning")
2313 gp := netpoll(true) // block until new work is available
2314 atomic.Store64(&sched.lastpoll, uint64(nanotime()))
2315 if gp != nil {
2316 lock(&sched.lock)
2317 _p_ = pidleget()
2318 unlock(&sched.lock)
2319 if _p_ != nil {
2320 acquirep(_p_)
2321 injectglist(gp.schedlink.ptr())
2322 casgstatus(gp, _Gwaiting, _Grunnable)
2323 if trace.enabled {
2324 traceGoUnpark(gp, 0)
2326 return gp, false
2328 injectglist(gp)
2331 stopm()
2332 goto top
2335 // pollWork returns true if there is non-background work this P could
2336 // be doing. This is a fairly lightweight check to be used for
2337 // background work loops, like idle GC. It checks a subset of the
2338 // conditions checked by the actual scheduler.
2339 func pollWork() bool {
2340 if sched.runqsize != 0 {
2341 return true
2343 p := getg().m.p.ptr()
2344 if !runqempty(p) {
2345 return true
2347 if netpollinited() && atomic.Load(&netpollWaiters) > 0 && sched.lastpoll != 0 {
2348 if gp := netpoll(false); gp != nil {
2349 injectglist(gp)
2350 return true
2353 return false
2356 func resetspinning() {
2357 _g_ := getg()
2358 if !_g_.m.spinning {
2359 throw("resetspinning: not a spinning m")
2361 _g_.m.spinning = false
2362 nmspinning := atomic.Xadd(&sched.nmspinning, -1)
2363 if int32(nmspinning) < 0 {
2364 throw("findrunnable: negative nmspinning")
2366 // M wakeup policy is deliberately somewhat conservative, so check if we
2367 // need to wakeup another P here. See "Worker thread parking/unparking"
2368 // comment at the top of the file for details.
2369 if nmspinning == 0 && atomic.Load(&sched.npidle) > 0 {
2370 wakep()
2374 // Injects the list of runnable G's into the scheduler.
2375 // Can run concurrently with GC.
2376 func injectglist(glist *g) {
2377 if glist == nil {
2378 return
2380 if trace.enabled {
2381 for gp := glist; gp != nil; gp = gp.schedlink.ptr() {
2382 traceGoUnpark(gp, 0)
2385 lock(&sched.lock)
2386 var n int
2387 for n = 0; glist != nil; n++ {
2388 gp := glist
2389 glist = gp.schedlink.ptr()
2390 casgstatus(gp, _Gwaiting, _Grunnable)
2391 globrunqput(gp)
2393 unlock(&sched.lock)
2394 for ; n != 0 && sched.npidle != 0; n-- {
2395 startm(nil, false)
2399 // One round of scheduler: find a runnable goroutine and execute it.
2400 // Never returns.
2401 func schedule() {
2402 _g_ := getg()
2404 if _g_.m.locks != 0 {
2405 throw("schedule: holding locks")
2408 if _g_.m.lockedg != 0 {
2409 stoplockedm()
2410 execute(_g_.m.lockedg.ptr(), false) // Never returns.
2413 // We should not schedule away from a g that is executing a cgo call,
2414 // since the cgo call is using the m's g0 stack.
2415 if _g_.m.incgo {
2416 throw("schedule: in cgo")
2419 top:
2420 if sched.gcwaiting != 0 {
2421 gcstopm()
2422 goto top
2424 if _g_.m.p.ptr().runSafePointFn != 0 {
2425 runSafePointFn()
2428 var gp *g
2429 var inheritTime bool
2430 if trace.enabled || trace.shutdown {
2431 gp = traceReader()
2432 if gp != nil {
2433 casgstatus(gp, _Gwaiting, _Grunnable)
2434 traceGoUnpark(gp, 0)
2437 if gp == nil && gcBlackenEnabled != 0 {
2438 gp = gcController.findRunnableGCWorker(_g_.m.p.ptr())
2440 if gp == nil {
2441 // Check the global runnable queue once in a while to ensure fairness.
2442 // Otherwise two goroutines can completely occupy the local runqueue
2443 // by constantly respawning each other.
2444 if _g_.m.p.ptr().schedtick%61 == 0 && sched.runqsize > 0 {
2445 lock(&sched.lock)
2446 gp = globrunqget(_g_.m.p.ptr(), 1)
2447 unlock(&sched.lock)
2450 if gp == nil {
2451 gp, inheritTime = runqget(_g_.m.p.ptr())
2452 if gp != nil && _g_.m.spinning {
2453 throw("schedule: spinning with local work")
2456 // Because gccgo does not implement preemption as a stack check,
2457 // we need to check for preemption here for fairness.
2458 // Otherwise goroutines on the local queue may starve
2459 // goroutines on the global queue.
2460 // Since we preempt by storing the goroutine on the global
2461 // queue, this is the only place we need to check preempt.
2462 // This does not call checkPreempt because gp is not running.
2463 if gp != nil && gp.preempt {
2464 gp.preempt = false
2465 lock(&sched.lock)
2466 globrunqput(gp)
2467 unlock(&sched.lock)
2468 goto top
2471 if gp == nil {
2472 gp, inheritTime = findrunnable() // blocks until work is available
2475 // This thread is going to run a goroutine and is not spinning anymore,
2476 // so if it was marked as spinning we need to reset it now and potentially
2477 // start a new spinning M.
2478 if _g_.m.spinning {
2479 resetspinning()
2482 if gp.lockedm != 0 {
2483 // Hands off own p to the locked m,
2484 // then blocks waiting for a new p.
2485 startlockedm(gp)
2486 goto top
2489 execute(gp, inheritTime)
2492 // dropg removes the association between m and the current goroutine m->curg (gp for short).
2493 // Typically a caller sets gp's status away from Grunning and then
2494 // immediately calls dropg to finish the job. The caller is also responsible
2495 // for arranging that gp will be restarted using ready at an
2496 // appropriate time. After calling dropg and arranging for gp to be
2497 // readied later, the caller can do other work but eventually should
2498 // call schedule to restart the scheduling of goroutines on this m.
2499 func dropg() {
2500 _g_ := getg()
2502 setMNoWB(&_g_.m.curg.m, nil)
2503 setGNoWB(&_g_.m.curg, nil)
2506 func parkunlock_c(gp *g, lock unsafe.Pointer) bool {
2507 unlock((*mutex)(lock))
2508 return true
2511 // park continuation on g0.
2512 func park_m(gp *g) {
2513 _g_ := getg()
2515 if trace.enabled {
2516 traceGoPark(_g_.m.waittraceev, _g_.m.waittraceskip)
2519 casgstatus(gp, _Grunning, _Gwaiting)
2520 dropg()
2522 if _g_.m.waitunlockf != nil {
2523 fn := *(*func(*g, unsafe.Pointer) bool)(unsafe.Pointer(&_g_.m.waitunlockf))
2524 ok := fn(gp, _g_.m.waitlock)
2525 _g_.m.waitunlockf = nil
2526 _g_.m.waitlock = nil
2527 if !ok {
2528 if trace.enabled {
2529 traceGoUnpark(gp, 2)
2531 casgstatus(gp, _Gwaiting, _Grunnable)
2532 execute(gp, true) // Schedule it back, never returns.
2535 schedule()
2538 func goschedImpl(gp *g) {
2539 status := readgstatus(gp)
2540 if status&^_Gscan != _Grunning {
2541 dumpgstatus(gp)
2542 throw("bad g status")
2544 casgstatus(gp, _Grunning, _Grunnable)
2545 dropg()
2546 lock(&sched.lock)
2547 globrunqput(gp)
2548 unlock(&sched.lock)
2550 schedule()
2553 // Gosched continuation on g0.
2554 func gosched_m(gp *g) {
2555 if trace.enabled {
2556 traceGoSched()
2558 goschedImpl(gp)
2561 // goschedguarded is a forbidden-states-avoided version of gosched_m
2562 func goschedguarded_m(gp *g) {
2564 if gp.m.locks != 0 || gp.m.mallocing != 0 || gp.m.preemptoff != "" || gp.m.p.ptr().status != _Prunning {
2565 gogo(gp) // never return
2568 if trace.enabled {
2569 traceGoSched()
2571 goschedImpl(gp)
2574 func gopreempt_m(gp *g) {
2575 if trace.enabled {
2576 traceGoPreempt()
2578 goschedImpl(gp)
2581 // Finishes execution of the current goroutine.
2582 func goexit1() {
2583 if trace.enabled {
2584 traceGoEnd()
2586 mcall(goexit0)
2589 // goexit continuation on g0.
2590 func goexit0(gp *g) {
2591 _g_ := getg()
2593 casgstatus(gp, _Grunning, _Gdead)
2594 if isSystemGoroutine(gp) {
2595 atomic.Xadd(&sched.ngsys, -1)
2596 gp.isSystemGoroutine = false
2598 gp.m = nil
2599 locked := gp.lockedm != 0
2600 gp.lockedm = 0
2601 _g_.m.lockedg = 0
2602 gp.entry = nil
2603 gp.paniconfault = false
2604 gp._defer = nil // should be true already but just in case.
2605 gp._panic = nil // non-nil for Goexit during panic. points at stack-allocated data.
2606 gp.writebuf = nil
2607 gp.waitreason = ""
2608 gp.param = nil
2609 gp.labels = nil
2610 gp.timer = nil
2612 if gcBlackenEnabled != 0 && gp.gcAssistBytes > 0 {
2613 // Flush assist credit to the global pool. This gives
2614 // better information to pacing if the application is
2615 // rapidly creating an exiting goroutines.
2616 scanCredit := int64(gcController.assistWorkPerByte * float64(gp.gcAssistBytes))
2617 atomic.Xaddint64(&gcController.bgScanCredit, scanCredit)
2618 gp.gcAssistBytes = 0
2621 // Note that gp's stack scan is now "valid" because it has no
2622 // stack.
2623 gp.gcscanvalid = true
2624 dropg()
2626 if _g_.m.lockedInt != 0 {
2627 print("invalid m->lockedInt = ", _g_.m.lockedInt, "\n")
2628 throw("internal lockOSThread error")
2630 _g_.m.lockedExt = 0
2631 gfput(_g_.m.p.ptr(), gp)
2632 if locked {
2633 // The goroutine may have locked this thread because
2634 // it put it in an unusual kernel state. Kill it
2635 // rather than returning it to the thread pool.
2637 // Return to mstart, which will release the P and exit
2638 // the thread.
2639 if GOOS != "plan9" { // See golang.org/issue/22227.
2640 _g_.m.exiting = true
2641 gogo(_g_.m.g0)
2644 schedule()
2647 // The goroutine g is about to enter a system call.
2648 // Record that it's not using the cpu anymore.
2649 // This is called only from the go syscall library and cgocall,
2650 // not from the low-level system calls used by the runtime.
2652 // The entersyscall function is written in C, so that it can save the
2653 // current register context so that the GC will see them.
2654 // It calls reentersyscall.
2656 // Syscall tracing:
2657 // At the start of a syscall we emit traceGoSysCall to capture the stack trace.
2658 // If the syscall does not block, that is it, we do not emit any other events.
2659 // If the syscall blocks (that is, P is retaken), retaker emits traceGoSysBlock;
2660 // when syscall returns we emit traceGoSysExit and when the goroutine starts running
2661 // (potentially instantly, if exitsyscallfast returns true) we emit traceGoStart.
2662 // To ensure that traceGoSysExit is emitted strictly after traceGoSysBlock,
2663 // we remember current value of syscalltick in m (_g_.m.syscalltick = _g_.m.p.ptr().syscalltick),
2664 // whoever emits traceGoSysBlock increments p.syscalltick afterwards;
2665 // and we wait for the increment before emitting traceGoSysExit.
2666 // Note that the increment is done even if tracing is not enabled,
2667 // because tracing can be enabled in the middle of syscall. We don't want the wait to hang.
2669 //go:nosplit
2670 //go:noinline
2671 func reentersyscall(pc, sp uintptr) {
2672 _g_ := getg()
2674 // Disable preemption because during this function g is in Gsyscall status,
2675 // but can have inconsistent g->sched, do not let GC observe it.
2676 _g_.m.locks++
2678 _g_.syscallsp = sp
2679 _g_.syscallpc = pc
2680 casgstatus(_g_, _Grunning, _Gsyscall)
2682 if trace.enabled {
2683 systemstack(traceGoSysCall)
2686 if atomic.Load(&sched.sysmonwait) != 0 {
2687 systemstack(entersyscall_sysmon)
2690 if _g_.m.p.ptr().runSafePointFn != 0 {
2691 // runSafePointFn may stack split if run on this stack
2692 systemstack(runSafePointFn)
2695 _g_.m.syscalltick = _g_.m.p.ptr().syscalltick
2696 _g_.sysblocktraced = true
2697 _g_.m.mcache = nil
2698 _g_.m.p.ptr().m = 0
2699 atomic.Store(&_g_.m.p.ptr().status, _Psyscall)
2700 if sched.gcwaiting != 0 {
2701 systemstack(entersyscall_gcwait)
2704 _g_.m.locks--
2707 func entersyscall_sysmon() {
2708 lock(&sched.lock)
2709 if atomic.Load(&sched.sysmonwait) != 0 {
2710 atomic.Store(&sched.sysmonwait, 0)
2711 notewakeup(&sched.sysmonnote)
2713 unlock(&sched.lock)
2716 func entersyscall_gcwait() {
2717 _g_ := getg()
2718 _p_ := _g_.m.p.ptr()
2720 lock(&sched.lock)
2721 if sched.stopwait > 0 && atomic.Cas(&_p_.status, _Psyscall, _Pgcstop) {
2722 if trace.enabled {
2723 traceGoSysBlock(_p_)
2724 traceProcStop(_p_)
2726 _p_.syscalltick++
2727 if sched.stopwait--; sched.stopwait == 0 {
2728 notewakeup(&sched.stopnote)
2731 unlock(&sched.lock)
2734 // The same as reentersyscall(), but with a hint that the syscall is blocking.
2735 //go:nosplit
2736 func reentersyscallblock(pc, sp uintptr) {
2737 _g_ := getg()
2739 _g_.m.locks++ // see comment in entersyscall
2740 _g_.throwsplit = true
2741 _g_.m.syscalltick = _g_.m.p.ptr().syscalltick
2742 _g_.sysblocktraced = true
2743 _g_.m.p.ptr().syscalltick++
2745 // Leave SP around for GC and traceback.
2746 _g_.syscallsp = sp
2747 _g_.syscallpc = pc
2748 casgstatus(_g_, _Grunning, _Gsyscall)
2749 systemstack(entersyscallblock_handoff)
2751 _g_.m.locks--
2754 func entersyscallblock_handoff() {
2755 if trace.enabled {
2756 traceGoSysCall()
2757 traceGoSysBlock(getg().m.p.ptr())
2759 handoffp(releasep())
2762 // The goroutine g exited its system call.
2763 // Arrange for it to run on a cpu again.
2764 // This is called only from the go syscall library, not
2765 // from the low-level system calls used by the runtime.
2767 // Write barriers are not allowed because our P may have been stolen.
2769 //go:nosplit
2770 //go:nowritebarrierrec
2771 func exitsyscall(dummy int32) {
2772 _g_ := getg()
2774 _g_.m.locks++ // see comment in entersyscall
2776 _g_.waitsince = 0
2777 oldp := _g_.m.p.ptr()
2778 if exitsyscallfast() {
2779 if _g_.m.mcache == nil {
2780 systemstack(func() {
2781 throw("lost mcache")
2784 if trace.enabled {
2785 if oldp != _g_.m.p.ptr() || _g_.m.syscalltick != _g_.m.p.ptr().syscalltick {
2786 systemstack(traceGoStart)
2789 // There's a cpu for us, so we can run.
2790 _g_.m.p.ptr().syscalltick++
2791 // We need to cas the status and scan before resuming...
2792 casgstatus(_g_, _Gsyscall, _Grunning)
2794 exitsyscallclear(_g_)
2795 _g_.m.locks--
2796 _g_.throwsplit = false
2797 return
2800 _g_.sysexitticks = 0
2801 if trace.enabled {
2802 // Wait till traceGoSysBlock event is emitted.
2803 // This ensures consistency of the trace (the goroutine is started after it is blocked).
2804 for oldp != nil && oldp.syscalltick == _g_.m.syscalltick {
2805 osyield()
2807 // We can't trace syscall exit right now because we don't have a P.
2808 // Tracing code can invoke write barriers that cannot run without a P.
2809 // So instead we remember the syscall exit time and emit the event
2810 // in execute when we have a P.
2811 _g_.sysexitticks = cputicks()
2814 _g_.m.locks--
2816 // Call the scheduler.
2817 mcall(exitsyscall0)
2819 if _g_.m.mcache == nil {
2820 systemstack(func() {
2821 throw("lost mcache")
2825 // Scheduler returned, so we're allowed to run now.
2826 // Delete the syscallsp information that we left for
2827 // the garbage collector during the system call.
2828 // Must wait until now because until gosched returns
2829 // we don't know for sure that the garbage collector
2830 // is not running.
2831 exitsyscallclear(_g_)
2833 _g_.m.p.ptr().syscalltick++
2834 _g_.throwsplit = false
2837 //go:nosplit
2838 func exitsyscallfast() bool {
2839 _g_ := getg()
2841 // Freezetheworld sets stopwait but does not retake P's.
2842 if sched.stopwait == freezeStopWait {
2843 _g_.m.mcache = nil
2844 _g_.m.p = 0
2845 return false
2848 // Try to re-acquire the last P.
2849 if _g_.m.p != 0 && _g_.m.p.ptr().status == _Psyscall && atomic.Cas(&_g_.m.p.ptr().status, _Psyscall, _Prunning) {
2850 // There's a cpu for us, so we can run.
2851 exitsyscallfast_reacquired()
2852 return true
2855 // Try to get any other idle P.
2856 oldp := _g_.m.p.ptr()
2857 _g_.m.mcache = nil
2858 _g_.m.p = 0
2859 if sched.pidle != 0 {
2860 var ok bool
2861 systemstack(func() {
2862 ok = exitsyscallfast_pidle()
2863 if ok && trace.enabled {
2864 if oldp != nil {
2865 // Wait till traceGoSysBlock event is emitted.
2866 // This ensures consistency of the trace (the goroutine is started after it is blocked).
2867 for oldp.syscalltick == _g_.m.syscalltick {
2868 osyield()
2871 traceGoSysExit(0)
2874 if ok {
2875 return true
2878 return false
2881 // exitsyscallfast_reacquired is the exitsyscall path on which this G
2882 // has successfully reacquired the P it was running on before the
2883 // syscall.
2885 // This function is allowed to have write barriers because exitsyscall
2886 // has acquired a P at this point.
2888 //go:yeswritebarrierrec
2889 //go:nosplit
2890 func exitsyscallfast_reacquired() {
2891 _g_ := getg()
2892 _g_.m.mcache = _g_.m.p.ptr().mcache
2893 _g_.m.p.ptr().m.set(_g_.m)
2894 if _g_.m.syscalltick != _g_.m.p.ptr().syscalltick {
2895 if trace.enabled {
2896 // The p was retaken and then enter into syscall again (since _g_.m.syscalltick has changed).
2897 // traceGoSysBlock for this syscall was already emitted,
2898 // but here we effectively retake the p from the new syscall running on the same p.
2899 systemstack(func() {
2900 // Denote blocking of the new syscall.
2901 traceGoSysBlock(_g_.m.p.ptr())
2902 // Denote completion of the current syscall.
2903 traceGoSysExit(0)
2906 _g_.m.p.ptr().syscalltick++
2910 func exitsyscallfast_pidle() bool {
2911 lock(&sched.lock)
2912 _p_ := pidleget()
2913 if _p_ != nil && atomic.Load(&sched.sysmonwait) != 0 {
2914 atomic.Store(&sched.sysmonwait, 0)
2915 notewakeup(&sched.sysmonnote)
2917 unlock(&sched.lock)
2918 if _p_ != nil {
2919 acquirep(_p_)
2920 return true
2922 return false
2925 // exitsyscall slow path on g0.
2926 // Failed to acquire P, enqueue gp as runnable.
2928 //go:nowritebarrierrec
2929 func exitsyscall0(gp *g) {
2930 _g_ := getg()
2932 casgstatus(gp, _Gsyscall, _Grunnable)
2933 dropg()
2934 lock(&sched.lock)
2935 _p_ := pidleget()
2936 if _p_ == nil {
2937 globrunqput(gp)
2938 } else if atomic.Load(&sched.sysmonwait) != 0 {
2939 atomic.Store(&sched.sysmonwait, 0)
2940 notewakeup(&sched.sysmonnote)
2942 unlock(&sched.lock)
2943 if _p_ != nil {
2944 acquirep(_p_)
2945 execute(gp, false) // Never returns.
2947 if _g_.m.lockedg != 0 {
2948 // Wait until another thread schedules gp and so m again.
2949 stoplockedm()
2950 execute(gp, false) // Never returns.
2952 stopm()
2953 schedule() // Never returns.
2956 // exitsyscallclear clears GC-related information that we only track
2957 // during a syscall.
2958 func exitsyscallclear(gp *g) {
2959 // Garbage collector isn't running (since we are), so okay to
2960 // clear syscallsp.
2961 gp.syscallsp = 0
2963 gp.gcstack = 0
2964 gp.gcnextsp = 0
2965 memclrNoHeapPointers(unsafe.Pointer(&gp.gcregs), unsafe.Sizeof(gp.gcregs))
2968 // Code generated by cgo, and some library code, calls syscall.Entersyscall
2969 // and syscall.Exitsyscall.
2971 //go:linkname syscall_entersyscall syscall.Entersyscall
2972 //go:nosplit
2973 func syscall_entersyscall() {
2974 entersyscall(0)
2977 //go:linkname syscall_exitsyscall syscall.Exitsyscall
2978 //go:nosplit
2979 func syscall_exitsyscall() {
2980 exitsyscall(0)
2983 func beforefork() {
2984 gp := getg().m.curg
2986 // Block signals during a fork, so that the child does not run
2987 // a signal handler before exec if a signal is sent to the process
2988 // group. See issue #18600.
2989 gp.m.locks++
2990 msigsave(gp.m)
2991 sigblock()
2994 // Called from syscall package before fork.
2995 //go:linkname syscall_runtime_BeforeFork syscall.runtime_BeforeFork
2996 //go:nosplit
2997 func syscall_runtime_BeforeFork() {
2998 systemstack(beforefork)
3001 func afterfork() {
3002 gp := getg().m.curg
3004 msigrestore(gp.m.sigmask)
3006 gp.m.locks--
3009 // Called from syscall package after fork in parent.
3010 //go:linkname syscall_runtime_AfterFork syscall.runtime_AfterFork
3011 //go:nosplit
3012 func syscall_runtime_AfterFork() {
3013 systemstack(afterfork)
3016 // inForkedChild is true while manipulating signals in the child process.
3017 // This is used to avoid calling libc functions in case we are using vfork.
3018 var inForkedChild bool
3020 // Called from syscall package after fork in child.
3021 // It resets non-sigignored signals to the default handler, and
3022 // restores the signal mask in preparation for the exec.
3024 // Because this might be called during a vfork, and therefore may be
3025 // temporarily sharing address space with the parent process, this must
3026 // not change any global variables or calling into C code that may do so.
3028 //go:linkname syscall_runtime_AfterForkInChild syscall.runtime_AfterForkInChild
3029 //go:nosplit
3030 //go:nowritebarrierrec
3031 func syscall_runtime_AfterForkInChild() {
3032 // It's OK to change the global variable inForkedChild here
3033 // because we are going to change it back. There is no race here,
3034 // because if we are sharing address space with the parent process,
3035 // then the parent process can not be running concurrently.
3036 inForkedChild = true
3038 clearSignalHandlers()
3040 // When we are the child we are the only thread running,
3041 // so we know that nothing else has changed gp.m.sigmask.
3042 msigrestore(getg().m.sigmask)
3044 inForkedChild = false
3047 // Called from syscall package before Exec.
3048 //go:linkname syscall_runtime_BeforeExec syscall.runtime_BeforeExec
3049 func syscall_runtime_BeforeExec() {
3050 // Prevent thread creation during exec.
3051 execLock.lock()
3054 // Called from syscall package after Exec.
3055 //go:linkname syscall_runtime_AfterExec syscall.runtime_AfterExec
3056 func syscall_runtime_AfterExec() {
3057 execLock.unlock()
3060 // Create a new g running fn passing arg as the single argument.
3061 // Put it on the queue of g's waiting to run.
3062 // The compiler turns a go statement into a call to this.
3063 //go:linkname newproc __go_go
3064 func newproc(fn uintptr, arg unsafe.Pointer) *g {
3065 _g_ := getg()
3067 if fn == 0 {
3068 _g_.m.throwing = -1 // do not dump full stacks
3069 throw("go of nil func value")
3071 _g_.m.locks++ // disable preemption because it can be holding p in a local var
3073 _p_ := _g_.m.p.ptr()
3074 newg := gfget(_p_)
3075 var (
3076 sp unsafe.Pointer
3077 spsize uintptr
3079 if newg == nil {
3080 newg = malg(true, false, &sp, &spsize)
3081 casgstatus(newg, _Gidle, _Gdead)
3082 allgadd(newg) // publishes with a g->status of Gdead so GC scanner doesn't look at uninitialized stack.
3083 } else {
3084 resetNewG(newg, &sp, &spsize)
3086 newg.traceback = nil
3088 if readgstatus(newg) != _Gdead {
3089 throw("newproc1: new g is not Gdead")
3092 // Store the C function pointer into entryfn, take the address
3093 // of entryfn, convert it to a Go function value, and store
3094 // that in entry.
3095 newg.entryfn = fn
3096 var entry func(unsafe.Pointer)
3097 *(*unsafe.Pointer)(unsafe.Pointer(&entry)) = unsafe.Pointer(&newg.entryfn)
3098 newg.entry = entry
3100 newg.param = arg
3101 newg.gopc = getcallerpc()
3102 newg.startpc = fn
3103 if _g_.m.curg != nil {
3104 newg.labels = _g_.m.curg.labels
3106 if isSystemGoroutine(newg) {
3107 atomic.Xadd(&sched.ngsys, +1)
3109 newg.gcscanvalid = false
3110 casgstatus(newg, _Gdead, _Grunnable)
3112 if _p_.goidcache == _p_.goidcacheend {
3113 // Sched.goidgen is the last allocated id,
3114 // this batch must be [sched.goidgen+1, sched.goidgen+GoidCacheBatch].
3115 // At startup sched.goidgen=0, so main goroutine receives goid=1.
3116 _p_.goidcache = atomic.Xadd64(&sched.goidgen, _GoidCacheBatch)
3117 _p_.goidcache -= _GoidCacheBatch - 1
3118 _p_.goidcacheend = _p_.goidcache + _GoidCacheBatch
3120 newg.goid = int64(_p_.goidcache)
3121 _p_.goidcache++
3122 if trace.enabled {
3123 traceGoCreate(newg, newg.startpc)
3126 makeGContext(newg, sp, spsize)
3128 runqput(_p_, newg, true)
3130 if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 && mainStarted {
3131 wakep()
3133 _g_.m.locks--
3134 return newg
3137 // expectedSystemGoroutines counts the number of goroutines expected
3138 // to mark themselves as system goroutines. After they mark themselves
3139 // by calling setSystemGoroutine, this is decremented. NumGoroutines
3140 // uses this to wait for all system goroutines to mark themselves
3141 // before it counts them.
3142 var expectedSystemGoroutines uint32
3144 // expectSystemGoroutine is called when starting a goroutine that will
3145 // call setSystemGoroutine. It increments expectedSystemGoroutines.
3146 func expectSystemGoroutine() {
3147 atomic.Xadd(&expectedSystemGoroutines, +1)
3150 // waitForSystemGoroutines waits for all currently expected system
3151 // goroutines to register themselves.
3152 func waitForSystemGoroutines() {
3153 for atomic.Load(&expectedSystemGoroutines) > 0 {
3154 Gosched()
3155 osyield()
3159 // setSystemGoroutine marks this goroutine as a "system goroutine".
3160 // In the gc toolchain this is done by comparing startpc to a list of
3161 // saved special PCs. In gccgo that approach does not work as startpc
3162 // is often a thunk that invokes the real function with arguments,
3163 // so the thunk address never matches the saved special PCs. Instead,
3164 // since there are only a limited number of "system goroutines",
3165 // we force each one to mark itself as special.
3166 func setSystemGoroutine() {
3167 getg().isSystemGoroutine = true
3168 atomic.Xadd(&sched.ngsys, +1)
3169 atomic.Xadd(&expectedSystemGoroutines, -1)
3172 // Put on gfree list.
3173 // If local list is too long, transfer a batch to the global list.
3174 func gfput(_p_ *p, gp *g) {
3175 if readgstatus(gp) != _Gdead {
3176 throw("gfput: bad status (not Gdead)")
3179 gp.schedlink.set(_p_.gfree)
3180 _p_.gfree = gp
3181 _p_.gfreecnt++
3182 if _p_.gfreecnt >= 64 {
3183 lock(&sched.gflock)
3184 for _p_.gfreecnt >= 32 {
3185 _p_.gfreecnt--
3186 gp = _p_.gfree
3187 _p_.gfree = gp.schedlink.ptr()
3188 gp.schedlink.set(sched.gfree)
3189 sched.gfree = gp
3190 sched.ngfree++
3192 unlock(&sched.gflock)
3196 // Get from gfree list.
3197 // If local list is empty, grab a batch from global list.
3198 func gfget(_p_ *p) *g {
3199 retry:
3200 gp := _p_.gfree
3201 if gp == nil && sched.gfree != nil {
3202 lock(&sched.gflock)
3203 for _p_.gfreecnt < 32 {
3204 if sched.gfree != nil {
3205 gp = sched.gfree
3206 sched.gfree = gp.schedlink.ptr()
3207 } else {
3208 break
3210 _p_.gfreecnt++
3211 sched.ngfree--
3212 gp.schedlink.set(_p_.gfree)
3213 _p_.gfree = gp
3215 unlock(&sched.gflock)
3216 goto retry
3218 if gp != nil {
3219 _p_.gfree = gp.schedlink.ptr()
3220 _p_.gfreecnt--
3222 return gp
3225 // Purge all cached G's from gfree list to the global list.
3226 func gfpurge(_p_ *p) {
3227 lock(&sched.gflock)
3228 for _p_.gfreecnt != 0 {
3229 _p_.gfreecnt--
3230 gp := _p_.gfree
3231 _p_.gfree = gp.schedlink.ptr()
3232 gp.schedlink.set(sched.gfree)
3233 sched.gfree = gp
3234 sched.ngfree++
3236 unlock(&sched.gflock)
3239 // Breakpoint executes a breakpoint trap.
3240 func Breakpoint() {
3241 breakpoint()
3244 // dolockOSThread is called by LockOSThread and lockOSThread below
3245 // after they modify m.locked. Do not allow preemption during this call,
3246 // or else the m might be different in this function than in the caller.
3247 //go:nosplit
3248 func dolockOSThread() {
3249 _g_ := getg()
3250 _g_.m.lockedg.set(_g_)
3251 _g_.lockedm.set(_g_.m)
3254 //go:nosplit
3256 // LockOSThread wires the calling goroutine to its current operating system thread.
3257 // The calling goroutine will always execute in that thread,
3258 // and no other goroutine will execute in it,
3259 // until the calling goroutine has made as many calls to
3260 // UnlockOSThread as to LockOSThread.
3261 // If the calling goroutine exits without unlocking the thread,
3262 // the thread will be terminated.
3264 // A goroutine should call LockOSThread before calling OS services or
3265 // non-Go library functions that depend on per-thread state.
3266 func LockOSThread() {
3267 if atomic.Load(&newmHandoff.haveTemplateThread) == 0 && GOOS != "plan9" {
3268 // If we need to start a new thread from the locked
3269 // thread, we need the template thread. Start it now
3270 // while we're in a known-good state.
3271 startTemplateThread()
3273 _g_ := getg()
3274 _g_.m.lockedExt++
3275 if _g_.m.lockedExt == 0 {
3276 _g_.m.lockedExt--
3277 panic("LockOSThread nesting overflow")
3279 dolockOSThread()
3282 //go:nosplit
3283 func lockOSThread() {
3284 getg().m.lockedInt++
3285 dolockOSThread()
3288 // dounlockOSThread is called by UnlockOSThread and unlockOSThread below
3289 // after they update m->locked. Do not allow preemption during this call,
3290 // or else the m might be in different in this function than in the caller.
3291 //go:nosplit
3292 func dounlockOSThread() {
3293 _g_ := getg()
3294 if _g_.m.lockedInt != 0 || _g_.m.lockedExt != 0 {
3295 return
3297 _g_.m.lockedg = 0
3298 _g_.lockedm = 0
3301 //go:nosplit
3303 // UnlockOSThread undoes an earlier call to LockOSThread.
3304 // If this drops the number of active LockOSThread calls on the
3305 // calling goroutine to zero, it unwires the calling goroutine from
3306 // its fixed operating system thread.
3307 // If there are no active LockOSThread calls, this is a no-op.
3309 // Before calling UnlockOSThread, the caller must ensure that the OS
3310 // thread is suitable for running other goroutines. If the caller made
3311 // any permanent changes to the state of the thread that would affect
3312 // other goroutines, it should not call this function and thus leave
3313 // the goroutine locked to the OS thread until the goroutine (and
3314 // hence the thread) exits.
3315 func UnlockOSThread() {
3316 _g_ := getg()
3317 if _g_.m.lockedExt == 0 {
3318 return
3320 _g_.m.lockedExt--
3321 dounlockOSThread()
3324 //go:nosplit
3325 func unlockOSThread() {
3326 _g_ := getg()
3327 if _g_.m.lockedInt == 0 {
3328 systemstack(badunlockosthread)
3330 _g_.m.lockedInt--
3331 dounlockOSThread()
3334 func badunlockosthread() {
3335 throw("runtime: internal error: misuse of lockOSThread/unlockOSThread")
3338 func gcount() int32 {
3339 n := int32(allglen) - sched.ngfree - int32(atomic.Load(&sched.ngsys))
3340 for _, _p_ := range allp {
3341 n -= _p_.gfreecnt
3344 // All these variables can be changed concurrently, so the result can be inconsistent.
3345 // But at least the current goroutine is running.
3346 if n < 1 {
3347 n = 1
3349 return n
3352 func mcount() int32 {
3353 return int32(sched.mnext - sched.nmfreed)
3356 var prof struct {
3357 signalLock uint32
3358 hz int32
3361 func _System() { _System() }
3362 func _ExternalCode() { _ExternalCode() }
3363 func _LostExternalCode() { _LostExternalCode() }
3364 func _GC() { _GC() }
3365 func _LostSIGPROFDuringAtomic64() { _LostSIGPROFDuringAtomic64() }
3367 // Counts SIGPROFs received while in atomic64 critical section, on mips{,le}
3368 var lostAtomic64Count uint64
3370 var _SystemPC = funcPC(_System)
3371 var _ExternalCodePC = funcPC(_ExternalCode)
3372 var _GCPC = funcPC(_GC)
3374 // Called if we receive a SIGPROF signal.
3375 // Called by the signal handler, may run during STW.
3376 //go:nowritebarrierrec
3377 func sigprof(pc uintptr, gp *g, mp *m) {
3378 if prof.hz == 0 {
3379 return
3382 // Profiling runs concurrently with GC, so it must not allocate.
3383 // Set a trap in case the code does allocate.
3384 // Note that on windows, one thread takes profiles of all the
3385 // other threads, so mp is usually not getg().m.
3386 // In fact mp may not even be stopped.
3387 // See golang.org/issue/17165.
3388 getg().m.mallocing++
3390 traceback := true
3392 // If SIGPROF arrived while already fetching runtime callers
3393 // we can have trouble on older systems because the unwind
3394 // library calls dl_iterate_phdr which was not reentrant in
3395 // the past. alreadyInCallers checks for that.
3396 if gp == nil || alreadyInCallers() {
3397 traceback = false
3400 var stk [maxCPUProfStack]uintptr
3401 n := 0
3402 if traceback {
3403 var stklocs [maxCPUProfStack]location
3404 n = callers(0, stklocs[:])
3406 for i := 0; i < n; i++ {
3407 stk[i] = stklocs[i].pc
3411 if n <= 0 {
3412 // Normal traceback is impossible or has failed.
3413 // Account it against abstract "System" or "GC".
3414 n = 2
3415 stk[0] = pc
3416 if mp.preemptoff != "" || mp.helpgc != 0 {
3417 stk[1] = _GCPC + sys.PCQuantum
3418 } else {
3419 stk[1] = _SystemPC + sys.PCQuantum
3423 if prof.hz != 0 {
3424 if (GOARCH == "mips" || GOARCH == "mipsle") && lostAtomic64Count > 0 {
3425 cpuprof.addLostAtomic64(lostAtomic64Count)
3426 lostAtomic64Count = 0
3428 cpuprof.add(gp, stk[:n])
3430 getg().m.mallocing--
3433 // Use global arrays rather than using up lots of stack space in the
3434 // signal handler. This is safe since while we are executing a SIGPROF
3435 // signal other SIGPROF signals are blocked.
3436 var nonprofGoStklocs [maxCPUProfStack]location
3437 var nonprofGoStk [maxCPUProfStack]uintptr
3439 // sigprofNonGo is called if we receive a SIGPROF signal on a non-Go thread,
3440 // and the signal handler collected a stack trace in sigprofCallers.
3441 // When this is called, sigprofCallersUse will be non-zero.
3442 // g is nil, and what we can do is very limited.
3443 //go:nosplit
3444 //go:nowritebarrierrec
3445 func sigprofNonGo(pc uintptr) {
3446 if prof.hz != 0 {
3447 n := callers(0, nonprofGoStklocs[:])
3449 for i := 0; i < n; i++ {
3450 nonprofGoStk[i] = nonprofGoStklocs[i].pc
3453 if n <= 0 {
3454 n = 2
3455 nonprofGoStk[0] = pc
3456 nonprofGoStk[1] = _ExternalCodePC + sys.PCQuantum
3459 cpuprof.addNonGo(nonprofGoStk[:n])
3463 // sigprofNonGoPC is called when a profiling signal arrived on a
3464 // non-Go thread and we have a single PC value, not a stack trace.
3465 // g is nil, and what we can do is very limited.
3466 //go:nosplit
3467 //go:nowritebarrierrec
3468 func sigprofNonGoPC(pc uintptr) {
3469 if prof.hz != 0 {
3470 stk := []uintptr{
3472 funcPC(_ExternalCode) + sys.PCQuantum,
3474 cpuprof.addNonGo(stk)
3478 // setcpuprofilerate sets the CPU profiling rate to hz times per second.
3479 // If hz <= 0, setcpuprofilerate turns off CPU profiling.
3480 func setcpuprofilerate(hz int32) {
3481 // Force sane arguments.
3482 if hz < 0 {
3483 hz = 0
3486 // Disable preemption, otherwise we can be rescheduled to another thread
3487 // that has profiling enabled.
3488 _g_ := getg()
3489 _g_.m.locks++
3491 // Stop profiler on this thread so that it is safe to lock prof.
3492 // if a profiling signal came in while we had prof locked,
3493 // it would deadlock.
3494 setThreadCPUProfiler(0)
3496 for !atomic.Cas(&prof.signalLock, 0, 1) {
3497 osyield()
3499 if prof.hz != hz {
3500 setProcessCPUProfiler(hz)
3501 prof.hz = hz
3503 atomic.Store(&prof.signalLock, 0)
3505 lock(&sched.lock)
3506 sched.profilehz = hz
3507 unlock(&sched.lock)
3509 if hz != 0 {
3510 setThreadCPUProfiler(hz)
3513 _g_.m.locks--
3516 // Change number of processors. The world is stopped, sched is locked.
3517 // gcworkbufs are not being modified by either the GC or
3518 // the write barrier code.
3519 // Returns list of Ps with local work, they need to be scheduled by the caller.
3520 func procresize(nprocs int32) *p {
3521 old := gomaxprocs
3522 if old < 0 || nprocs <= 0 {
3523 throw("procresize: invalid arg")
3525 if trace.enabled {
3526 traceGomaxprocs(nprocs)
3529 // update statistics
3530 now := nanotime()
3531 if sched.procresizetime != 0 {
3532 sched.totaltime += int64(old) * (now - sched.procresizetime)
3534 sched.procresizetime = now
3536 // Grow allp if necessary.
3537 if nprocs > int32(len(allp)) {
3538 // Synchronize with retake, which could be running
3539 // concurrently since it doesn't run on a P.
3540 lock(&allpLock)
3541 if nprocs <= int32(cap(allp)) {
3542 allp = allp[:nprocs]
3543 } else {
3544 nallp := make([]*p, nprocs)
3545 // Copy everything up to allp's cap so we
3546 // never lose old allocated Ps.
3547 copy(nallp, allp[:cap(allp)])
3548 allp = nallp
3550 unlock(&allpLock)
3553 // initialize new P's
3554 for i := int32(0); i < nprocs; i++ {
3555 pp := allp[i]
3556 if pp == nil {
3557 pp = new(p)
3558 pp.id = i
3559 pp.status = _Pgcstop
3560 pp.sudogcache = pp.sudogbuf[:0]
3561 pp.deferpool = pp.deferpoolbuf[:0]
3562 pp.wbBuf.reset()
3563 atomicstorep(unsafe.Pointer(&allp[i]), unsafe.Pointer(pp))
3565 if pp.mcache == nil {
3566 if old == 0 && i == 0 {
3567 if getg().m.mcache == nil {
3568 throw("missing mcache?")
3570 pp.mcache = getg().m.mcache // bootstrap
3571 } else {
3572 pp.mcache = allocmcache()
3577 // free unused P's
3578 for i := nprocs; i < old; i++ {
3579 p := allp[i]
3580 if trace.enabled && p == getg().m.p.ptr() {
3581 // moving to p[0], pretend that we were descheduled
3582 // and then scheduled again to keep the trace sane.
3583 traceGoSched()
3584 traceProcStop(p)
3586 // move all runnable goroutines to the global queue
3587 for p.runqhead != p.runqtail {
3588 // pop from tail of local queue
3589 p.runqtail--
3590 gp := p.runq[p.runqtail%uint32(len(p.runq))].ptr()
3591 // push onto head of global queue
3592 globrunqputhead(gp)
3594 if p.runnext != 0 {
3595 globrunqputhead(p.runnext.ptr())
3596 p.runnext = 0
3598 // if there's a background worker, make it runnable and put
3599 // it on the global queue so it can clean itself up
3600 if gp := p.gcBgMarkWorker.ptr(); gp != nil {
3601 casgstatus(gp, _Gwaiting, _Grunnable)
3602 if trace.enabled {
3603 traceGoUnpark(gp, 0)
3605 globrunqput(gp)
3606 // This assignment doesn't race because the
3607 // world is stopped.
3608 p.gcBgMarkWorker.set(nil)
3610 // Flush p's write barrier buffer.
3611 if gcphase != _GCoff {
3612 wbBufFlush1(p)
3613 p.gcw.dispose()
3615 for i := range p.sudogbuf {
3616 p.sudogbuf[i] = nil
3618 p.sudogcache = p.sudogbuf[:0]
3619 for i := range p.deferpoolbuf {
3620 p.deferpoolbuf[i] = nil
3622 p.deferpool = p.deferpoolbuf[:0]
3623 freemcache(p.mcache)
3624 p.mcache = nil
3625 gfpurge(p)
3626 traceProcFree(p)
3627 p.gcAssistTime = 0
3628 p.status = _Pdead
3629 // can't free P itself because it can be referenced by an M in syscall
3632 // Trim allp.
3633 if int32(len(allp)) != nprocs {
3634 lock(&allpLock)
3635 allp = allp[:nprocs]
3636 unlock(&allpLock)
3639 _g_ := getg()
3640 if _g_.m.p != 0 && _g_.m.p.ptr().id < nprocs {
3641 // continue to use the current P
3642 _g_.m.p.ptr().status = _Prunning
3643 } else {
3644 // release the current P and acquire allp[0]
3645 if _g_.m.p != 0 {
3646 _g_.m.p.ptr().m = 0
3648 _g_.m.p = 0
3649 _g_.m.mcache = nil
3650 p := allp[0]
3651 p.m = 0
3652 p.status = _Pidle
3653 acquirep(p)
3654 if trace.enabled {
3655 traceGoStart()
3658 var runnablePs *p
3659 for i := nprocs - 1; i >= 0; i-- {
3660 p := allp[i]
3661 if _g_.m.p.ptr() == p {
3662 continue
3664 p.status = _Pidle
3665 if runqempty(p) {
3666 pidleput(p)
3667 } else {
3668 p.m.set(mget())
3669 p.link.set(runnablePs)
3670 runnablePs = p
3673 stealOrder.reset(uint32(nprocs))
3674 var int32p *int32 = &gomaxprocs // make compiler check that gomaxprocs is an int32
3675 atomic.Store((*uint32)(unsafe.Pointer(int32p)), uint32(nprocs))
3676 return runnablePs
3679 // Associate p and the current m.
3681 // This function is allowed to have write barriers even if the caller
3682 // isn't because it immediately acquires _p_.
3684 //go:yeswritebarrierrec
3685 func acquirep(_p_ *p) {
3686 // Do the part that isn't allowed to have write barriers.
3687 acquirep1(_p_)
3689 // have p; write barriers now allowed
3690 _g_ := getg()
3691 _g_.m.mcache = _p_.mcache
3693 if trace.enabled {
3694 traceProcStart()
3698 // acquirep1 is the first step of acquirep, which actually acquires
3699 // _p_. This is broken out so we can disallow write barriers for this
3700 // part, since we don't yet have a P.
3702 //go:nowritebarrierrec
3703 func acquirep1(_p_ *p) {
3704 _g_ := getg()
3706 if _g_.m.p != 0 || _g_.m.mcache != nil {
3707 throw("acquirep: already in go")
3709 if _p_.m != 0 || _p_.status != _Pidle {
3710 id := int64(0)
3711 if _p_.m != 0 {
3712 id = _p_.m.ptr().id
3714 print("acquirep: p->m=", _p_.m, "(", id, ") p->status=", _p_.status, "\n")
3715 throw("acquirep: invalid p state")
3717 _g_.m.p.set(_p_)
3718 _p_.m.set(_g_.m)
3719 _p_.status = _Prunning
3722 // Disassociate p and the current m.
3723 func releasep() *p {
3724 _g_ := getg()
3726 if _g_.m.p == 0 || _g_.m.mcache == nil {
3727 throw("releasep: invalid arg")
3729 _p_ := _g_.m.p.ptr()
3730 if _p_.m.ptr() != _g_.m || _p_.mcache != _g_.m.mcache || _p_.status != _Prunning {
3731 print("releasep: m=", _g_.m, " m->p=", _g_.m.p.ptr(), " p->m=", _p_.m, " m->mcache=", _g_.m.mcache, " p->mcache=", _p_.mcache, " p->status=", _p_.status, "\n")
3732 throw("releasep: invalid p state")
3734 if trace.enabled {
3735 traceProcStop(_g_.m.p.ptr())
3737 _g_.m.p = 0
3738 _g_.m.mcache = nil
3739 _p_.m = 0
3740 _p_.status = _Pidle
3741 return _p_
3744 func incidlelocked(v int32) {
3745 lock(&sched.lock)
3746 sched.nmidlelocked += v
3747 if v > 0 {
3748 checkdead()
3750 unlock(&sched.lock)
3753 // Check for deadlock situation.
3754 // The check is based on number of running M's, if 0 -> deadlock.
3755 // sched.lock must be held.
3756 func checkdead() {
3757 // For -buildmode=c-shared or -buildmode=c-archive it's OK if
3758 // there are no running goroutines. The calling program is
3759 // assumed to be running.
3760 if islibrary || isarchive {
3761 return
3764 // If we are dying because of a signal caught on an already idle thread,
3765 // freezetheworld will cause all running threads to block.
3766 // And runtime will essentially enter into deadlock state,
3767 // except that there is a thread that will call exit soon.
3768 if panicking > 0 {
3769 return
3772 run := mcount() - sched.nmidle - sched.nmidlelocked - sched.nmsys
3773 if run > 0 {
3774 return
3776 if run < 0 {
3777 print("runtime: checkdead: nmidle=", sched.nmidle, " nmidlelocked=", sched.nmidlelocked, " mcount=", mcount(), " nmsys=", sched.nmsys, "\n")
3778 throw("checkdead: inconsistent counts")
3781 grunning := 0
3782 lock(&allglock)
3783 for i := 0; i < len(allgs); i++ {
3784 gp := allgs[i]
3785 if isSystemGoroutine(gp) {
3786 continue
3788 s := readgstatus(gp)
3789 switch s &^ _Gscan {
3790 case _Gwaiting:
3791 grunning++
3792 case _Grunnable,
3793 _Grunning,
3794 _Gsyscall:
3795 unlock(&allglock)
3796 print("runtime: checkdead: find g ", gp.goid, " in status ", s, "\n")
3797 throw("checkdead: runnable g")
3800 unlock(&allglock)
3801 if grunning == 0 { // possible if main goroutine calls runtime·Goexit()
3802 throw("no goroutines (main called runtime.Goexit) - deadlock!")
3805 // Maybe jump time forward for playground.
3806 gp := timejump()
3807 if gp != nil {
3808 casgstatus(gp, _Gwaiting, _Grunnable)
3809 globrunqput(gp)
3810 _p_ := pidleget()
3811 if _p_ == nil {
3812 throw("checkdead: no p for timer")
3814 mp := mget()
3815 if mp == nil {
3816 // There should always be a free M since
3817 // nothing is running.
3818 throw("checkdead: no m for timer")
3820 mp.nextp.set(_p_)
3821 notewakeup(&mp.park)
3822 return
3825 getg().m.throwing = -1 // do not dump full stacks
3826 throw("all goroutines are asleep - deadlock!")
3829 // forcegcperiod is the maximum time in nanoseconds between garbage
3830 // collections. If we go this long without a garbage collection, one
3831 // is forced to run.
3833 // This is a variable for testing purposes. It normally doesn't change.
3834 var forcegcperiod int64 = 2 * 60 * 1e9
3836 // Always runs without a P, so write barriers are not allowed.
3838 //go:nowritebarrierrec
3839 func sysmon() {
3840 lock(&sched.lock)
3841 sched.nmsys++
3842 checkdead()
3843 unlock(&sched.lock)
3845 // If a heap span goes unused for 5 minutes after a garbage collection,
3846 // we hand it back to the operating system.
3847 scavengelimit := int64(5 * 60 * 1e9)
3849 if debug.scavenge > 0 {
3850 // Scavenge-a-lot for testing.
3851 forcegcperiod = 10 * 1e6
3852 scavengelimit = 20 * 1e6
3855 lastscavenge := nanotime()
3856 nscavenge := 0
3858 lasttrace := int64(0)
3859 idle := 0 // how many cycles in succession we had not wokeup somebody
3860 delay := uint32(0)
3861 for {
3862 if idle == 0 { // start with 20us sleep...
3863 delay = 20
3864 } else if idle > 50 { // start doubling the sleep after 1ms...
3865 delay *= 2
3867 if delay > 10*1000 { // up to 10ms
3868 delay = 10 * 1000
3870 usleep(delay)
3871 if debug.schedtrace <= 0 && (sched.gcwaiting != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs)) {
3872 lock(&sched.lock)
3873 if atomic.Load(&sched.gcwaiting) != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs) {
3874 atomic.Store(&sched.sysmonwait, 1)
3875 unlock(&sched.lock)
3876 // Make wake-up period small enough
3877 // for the sampling to be correct.
3878 maxsleep := forcegcperiod / 2
3879 if scavengelimit < forcegcperiod {
3880 maxsleep = scavengelimit / 2
3882 shouldRelax := true
3883 if osRelaxMinNS > 0 {
3884 next := timeSleepUntil()
3885 now := nanotime()
3886 if next-now < osRelaxMinNS {
3887 shouldRelax = false
3890 if shouldRelax {
3891 osRelax(true)
3893 notetsleep(&sched.sysmonnote, maxsleep)
3894 if shouldRelax {
3895 osRelax(false)
3897 lock(&sched.lock)
3898 atomic.Store(&sched.sysmonwait, 0)
3899 noteclear(&sched.sysmonnote)
3900 idle = 0
3901 delay = 20
3903 unlock(&sched.lock)
3905 // trigger libc interceptors if needed
3906 if *cgo_yield != nil {
3907 asmcgocall(*cgo_yield, nil)
3909 // poll network if not polled for more than 10ms
3910 lastpoll := int64(atomic.Load64(&sched.lastpoll))
3911 now := nanotime()
3912 if netpollinited() && lastpoll != 0 && lastpoll+10*1000*1000 < now {
3913 atomic.Cas64(&sched.lastpoll, uint64(lastpoll), uint64(now))
3914 gp := netpoll(false) // non-blocking - returns list of goroutines
3915 if gp != nil {
3916 // Need to decrement number of idle locked M's
3917 // (pretending that one more is running) before injectglist.
3918 // Otherwise it can lead to the following situation:
3919 // injectglist grabs all P's but before it starts M's to run the P's,
3920 // another M returns from syscall, finishes running its G,
3921 // observes that there is no work to do and no other running M's
3922 // and reports deadlock.
3923 incidlelocked(-1)
3924 injectglist(gp)
3925 incidlelocked(1)
3928 // retake P's blocked in syscalls
3929 // and preempt long running G's
3930 if retake(now) != 0 {
3931 idle = 0
3932 } else {
3933 idle++
3935 // check if we need to force a GC
3936 if t := (gcTrigger{kind: gcTriggerTime, now: now}); t.test() && atomic.Load(&forcegc.idle) != 0 {
3937 lock(&forcegc.lock)
3938 forcegc.idle = 0
3939 forcegc.g.schedlink = 0
3940 injectglist(forcegc.g)
3941 unlock(&forcegc.lock)
3943 // scavenge heap once in a while
3944 if lastscavenge+scavengelimit/2 < now {
3945 mheap_.scavenge(int32(nscavenge), uint64(now), uint64(scavengelimit))
3946 lastscavenge = now
3947 nscavenge++
3949 if debug.schedtrace > 0 && lasttrace+int64(debug.schedtrace)*1000000 <= now {
3950 lasttrace = now
3951 schedtrace(debug.scheddetail > 0)
3956 type sysmontick struct {
3957 schedtick uint32
3958 schedwhen int64
3959 syscalltick uint32
3960 syscallwhen int64
3963 // forcePreemptNS is the time slice given to a G before it is
3964 // preempted.
3965 const forcePreemptNS = 10 * 1000 * 1000 // 10ms
3967 func retake(now int64) uint32 {
3968 n := 0
3969 // Prevent allp slice changes. This lock will be completely
3970 // uncontended unless we're already stopping the world.
3971 lock(&allpLock)
3972 // We can't use a range loop over allp because we may
3973 // temporarily drop the allpLock. Hence, we need to re-fetch
3974 // allp each time around the loop.
3975 for i := 0; i < len(allp); i++ {
3976 _p_ := allp[i]
3977 if _p_ == nil {
3978 // This can happen if procresize has grown
3979 // allp but not yet created new Ps.
3980 continue
3982 pd := &_p_.sysmontick
3983 s := _p_.status
3984 if s == _Psyscall {
3985 // Retake P from syscall if it's there for more than 1 sysmon tick (at least 20us).
3986 t := int64(_p_.syscalltick)
3987 if int64(pd.syscalltick) != t {
3988 pd.syscalltick = uint32(t)
3989 pd.syscallwhen = now
3990 continue
3992 // On the one hand we don't want to retake Ps if there is no other work to do,
3993 // but on the other hand we want to retake them eventually
3994 // because they can prevent the sysmon thread from deep sleep.
3995 if runqempty(_p_) && atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) > 0 && pd.syscallwhen+10*1000*1000 > now {
3996 continue
3998 // Drop allpLock so we can take sched.lock.
3999 unlock(&allpLock)
4000 // Need to decrement number of idle locked M's
4001 // (pretending that one more is running) before the CAS.
4002 // Otherwise the M from which we retake can exit the syscall,
4003 // increment nmidle and report deadlock.
4004 incidlelocked(-1)
4005 if atomic.Cas(&_p_.status, s, _Pidle) {
4006 if trace.enabled {
4007 traceGoSysBlock(_p_)
4008 traceProcStop(_p_)
4011 _p_.syscalltick++
4012 handoffp(_p_)
4014 incidlelocked(1)
4015 lock(&allpLock)
4016 } else if s == _Prunning {
4017 // Preempt G if it's running for too long.
4018 t := int64(_p_.schedtick)
4019 if int64(pd.schedtick) != t {
4020 pd.schedtick = uint32(t)
4021 pd.schedwhen = now
4022 continue
4024 if pd.schedwhen+forcePreemptNS > now {
4025 continue
4027 preemptone(_p_)
4030 unlock(&allpLock)
4031 return uint32(n)
4034 // Tell all goroutines that they have been preempted and they should stop.
4035 // This function is purely best-effort. It can fail to inform a goroutine if a
4036 // processor just started running it.
4037 // No locks need to be held.
4038 // Returns true if preemption request was issued to at least one goroutine.
4039 func preemptall() bool {
4040 res := false
4041 for _, _p_ := range allp {
4042 if _p_.status != _Prunning {
4043 continue
4045 if preemptone(_p_) {
4046 res = true
4049 return res
4052 // Tell the goroutine running on processor P to stop.
4053 // This function is purely best-effort. It can incorrectly fail to inform the
4054 // goroutine. It can send inform the wrong goroutine. Even if it informs the
4055 // correct goroutine, that goroutine might ignore the request if it is
4056 // simultaneously executing newstack.
4057 // No lock needs to be held.
4058 // Returns true if preemption request was issued.
4059 // The actual preemption will happen at some point in the future
4060 // and will be indicated by the gp->status no longer being
4061 // Grunning
4062 func preemptone(_p_ *p) bool {
4063 mp := _p_.m.ptr()
4064 if mp == nil || mp == getg().m {
4065 return false
4067 gp := mp.curg
4068 if gp == nil || gp == mp.g0 {
4069 return false
4072 gp.preempt = true
4074 // At this point the gc implementation sets gp.stackguard0 to
4075 // a value that causes the goroutine to suspend itself.
4076 // gccgo has no support for this, and it's hard to support.
4077 // The split stack code reads a value from its TCB.
4078 // We have no way to set a value in the TCB of a different thread.
4079 // And, of course, not all systems support split stack anyhow.
4080 // Checking the field in the g is expensive, since it requires
4081 // loading the g from TLS. The best mechanism is likely to be
4082 // setting a global variable and figuring out a way to efficiently
4083 // check that global variable.
4085 // For now we check gp.preempt in schedule and mallocgc,
4086 // which is at least better than doing nothing at all.
4088 return true
4091 var starttime int64
4093 func schedtrace(detailed bool) {
4094 now := nanotime()
4095 if starttime == 0 {
4096 starttime = now
4099 lock(&sched.lock)
4100 print("SCHED ", (now-starttime)/1e6, "ms: gomaxprocs=", gomaxprocs, " idleprocs=", sched.npidle, " threads=", mcount(), " spinningthreads=", sched.nmspinning, " idlethreads=", sched.nmidle, " runqueue=", sched.runqsize)
4101 if detailed {
4102 print(" gcwaiting=", sched.gcwaiting, " nmidlelocked=", sched.nmidlelocked, " stopwait=", sched.stopwait, " sysmonwait=", sched.sysmonwait, "\n")
4104 // We must be careful while reading data from P's, M's and G's.
4105 // Even if we hold schedlock, most data can be changed concurrently.
4106 // E.g. (p->m ? p->m->id : -1) can crash if p->m changes from non-nil to nil.
4107 for i, _p_ := range allp {
4108 mp := _p_.m.ptr()
4109 h := atomic.Load(&_p_.runqhead)
4110 t := atomic.Load(&_p_.runqtail)
4111 if detailed {
4112 id := int64(-1)
4113 if mp != nil {
4114 id = mp.id
4116 print(" P", i, ": status=", _p_.status, " schedtick=", _p_.schedtick, " syscalltick=", _p_.syscalltick, " m=", id, " runqsize=", t-h, " gfreecnt=", _p_.gfreecnt, "\n")
4117 } else {
4118 // In non-detailed mode format lengths of per-P run queues as:
4119 // [len1 len2 len3 len4]
4120 print(" ")
4121 if i == 0 {
4122 print("[")
4124 print(t - h)
4125 if i == len(allp)-1 {
4126 print("]\n")
4131 if !detailed {
4132 unlock(&sched.lock)
4133 return
4136 for mp := allm; mp != nil; mp = mp.alllink {
4137 _p_ := mp.p.ptr()
4138 gp := mp.curg
4139 lockedg := mp.lockedg.ptr()
4140 id1 := int32(-1)
4141 if _p_ != nil {
4142 id1 = _p_.id
4144 id2 := int64(-1)
4145 if gp != nil {
4146 id2 = gp.goid
4148 id3 := int64(-1)
4149 if lockedg != nil {
4150 id3 = lockedg.goid
4152 print(" M", mp.id, ": p=", id1, " curg=", id2, " mallocing=", mp.mallocing, " throwing=", mp.throwing, " preemptoff=", mp.preemptoff, ""+" locks=", mp.locks, " dying=", mp.dying, " helpgc=", mp.helpgc, " spinning=", mp.spinning, " blocked=", mp.blocked, " lockedg=", id3, "\n")
4155 lock(&allglock)
4156 for gi := 0; gi < len(allgs); gi++ {
4157 gp := allgs[gi]
4158 mp := gp.m
4159 lockedm := gp.lockedm.ptr()
4160 id1 := int64(-1)
4161 if mp != nil {
4162 id1 = mp.id
4164 id2 := int64(-1)
4165 if lockedm != nil {
4166 id2 = lockedm.id
4168 print(" G", gp.goid, ": status=", readgstatus(gp), "(", gp.waitreason, ") m=", id1, " lockedm=", id2, "\n")
4170 unlock(&allglock)
4171 unlock(&sched.lock)
4174 // Put mp on midle list.
4175 // Sched must be locked.
4176 // May run during STW, so write barriers are not allowed.
4177 //go:nowritebarrierrec
4178 func mput(mp *m) {
4179 mp.schedlink = sched.midle
4180 sched.midle.set(mp)
4181 sched.nmidle++
4182 checkdead()
4185 // Try to get an m from midle list.
4186 // Sched must be locked.
4187 // May run during STW, so write barriers are not allowed.
4188 //go:nowritebarrierrec
4189 func mget() *m {
4190 mp := sched.midle.ptr()
4191 if mp != nil {
4192 sched.midle = mp.schedlink
4193 sched.nmidle--
4195 return mp
4198 // Put gp on the global runnable queue.
4199 // Sched must be locked.
4200 // May run during STW, so write barriers are not allowed.
4201 //go:nowritebarrierrec
4202 func globrunqput(gp *g) {
4203 gp.schedlink = 0
4204 if sched.runqtail != 0 {
4205 sched.runqtail.ptr().schedlink.set(gp)
4206 } else {
4207 sched.runqhead.set(gp)
4209 sched.runqtail.set(gp)
4210 sched.runqsize++
4213 // Put gp at the head of the global runnable queue.
4214 // Sched must be locked.
4215 // May run during STW, so write barriers are not allowed.
4216 //go:nowritebarrierrec
4217 func globrunqputhead(gp *g) {
4218 gp.schedlink = sched.runqhead
4219 sched.runqhead.set(gp)
4220 if sched.runqtail == 0 {
4221 sched.runqtail.set(gp)
4223 sched.runqsize++
4226 // Put a batch of runnable goroutines on the global runnable queue.
4227 // Sched must be locked.
4228 func globrunqputbatch(ghead *g, gtail *g, n int32) {
4229 gtail.schedlink = 0
4230 if sched.runqtail != 0 {
4231 sched.runqtail.ptr().schedlink.set(ghead)
4232 } else {
4233 sched.runqhead.set(ghead)
4235 sched.runqtail.set(gtail)
4236 sched.runqsize += n
4239 // Try get a batch of G's from the global runnable queue.
4240 // Sched must be locked.
4241 func globrunqget(_p_ *p, max int32) *g {
4242 if sched.runqsize == 0 {
4243 return nil
4246 n := sched.runqsize/gomaxprocs + 1
4247 if n > sched.runqsize {
4248 n = sched.runqsize
4250 if max > 0 && n > max {
4251 n = max
4253 if n > int32(len(_p_.runq))/2 {
4254 n = int32(len(_p_.runq)) / 2
4257 sched.runqsize -= n
4258 if sched.runqsize == 0 {
4259 sched.runqtail = 0
4262 gp := sched.runqhead.ptr()
4263 sched.runqhead = gp.schedlink
4265 for ; n > 0; n-- {
4266 gp1 := sched.runqhead.ptr()
4267 sched.runqhead = gp1.schedlink
4268 runqput(_p_, gp1, false)
4270 return gp
4273 // Put p to on _Pidle list.
4274 // Sched must be locked.
4275 // May run during STW, so write barriers are not allowed.
4276 //go:nowritebarrierrec
4277 func pidleput(_p_ *p) {
4278 if !runqempty(_p_) {
4279 throw("pidleput: P has non-empty run queue")
4281 _p_.link = sched.pidle
4282 sched.pidle.set(_p_)
4283 atomic.Xadd(&sched.npidle, 1) // TODO: fast atomic
4286 // Try get a p from _Pidle list.
4287 // Sched must be locked.
4288 // May run during STW, so write barriers are not allowed.
4289 //go:nowritebarrierrec
4290 func pidleget() *p {
4291 _p_ := sched.pidle.ptr()
4292 if _p_ != nil {
4293 sched.pidle = _p_.link
4294 atomic.Xadd(&sched.npidle, -1) // TODO: fast atomic
4296 return _p_
4299 // runqempty returns true if _p_ has no Gs on its local run queue.
4300 // It never returns true spuriously.
4301 func runqempty(_p_ *p) bool {
4302 // Defend against a race where 1) _p_ has G1 in runqnext but runqhead == runqtail,
4303 // 2) runqput on _p_ kicks G1 to the runq, 3) runqget on _p_ empties runqnext.
4304 // Simply observing that runqhead == runqtail and then observing that runqnext == nil
4305 // does not mean the queue is empty.
4306 for {
4307 head := atomic.Load(&_p_.runqhead)
4308 tail := atomic.Load(&_p_.runqtail)
4309 runnext := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&_p_.runnext)))
4310 if tail == atomic.Load(&_p_.runqtail) {
4311 return head == tail && runnext == 0
4316 // To shake out latent assumptions about scheduling order,
4317 // we introduce some randomness into scheduling decisions
4318 // when running with the race detector.
4319 // The need for this was made obvious by changing the
4320 // (deterministic) scheduling order in Go 1.5 and breaking
4321 // many poorly-written tests.
4322 // With the randomness here, as long as the tests pass
4323 // consistently with -race, they shouldn't have latent scheduling
4324 // assumptions.
4325 const randomizeScheduler = raceenabled
4327 // runqput tries to put g on the local runnable queue.
4328 // If next if false, runqput adds g to the tail of the runnable queue.
4329 // If next is true, runqput puts g in the _p_.runnext slot.
4330 // If the run queue is full, runnext puts g on the global queue.
4331 // Executed only by the owner P.
4332 func runqput(_p_ *p, gp *g, next bool) {
4333 if randomizeScheduler && next && fastrand()%2 == 0 {
4334 next = false
4337 if next {
4338 retryNext:
4339 oldnext := _p_.runnext
4340 if !_p_.runnext.cas(oldnext, guintptr(unsafe.Pointer(gp))) {
4341 goto retryNext
4343 if oldnext == 0 {
4344 return
4346 // Kick the old runnext out to the regular run queue.
4347 gp = oldnext.ptr()
4350 retry:
4351 h := atomic.Load(&_p_.runqhead) // load-acquire, synchronize with consumers
4352 t := _p_.runqtail
4353 if t-h < uint32(len(_p_.runq)) {
4354 _p_.runq[t%uint32(len(_p_.runq))].set(gp)
4355 atomic.Store(&_p_.runqtail, t+1) // store-release, makes the item available for consumption
4356 return
4358 if runqputslow(_p_, gp, h, t) {
4359 return
4361 // the queue is not full, now the put above must succeed
4362 goto retry
4365 // Put g and a batch of work from local runnable queue on global queue.
4366 // Executed only by the owner P.
4367 func runqputslow(_p_ *p, gp *g, h, t uint32) bool {
4368 var batch [len(_p_.runq)/2 + 1]*g
4370 // First, grab a batch from local queue.
4371 n := t - h
4372 n = n / 2
4373 if n != uint32(len(_p_.runq)/2) {
4374 throw("runqputslow: queue is not full")
4376 for i := uint32(0); i < n; i++ {
4377 batch[i] = _p_.runq[(h+i)%uint32(len(_p_.runq))].ptr()
4379 if !atomic.Cas(&_p_.runqhead, h, h+n) { // cas-release, commits consume
4380 return false
4382 batch[n] = gp
4384 if randomizeScheduler {
4385 for i := uint32(1); i <= n; i++ {
4386 j := fastrandn(i + 1)
4387 batch[i], batch[j] = batch[j], batch[i]
4391 // Link the goroutines.
4392 for i := uint32(0); i < n; i++ {
4393 batch[i].schedlink.set(batch[i+1])
4396 // Now put the batch on global queue.
4397 lock(&sched.lock)
4398 globrunqputbatch(batch[0], batch[n], int32(n+1))
4399 unlock(&sched.lock)
4400 return true
4403 // Get g from local runnable queue.
4404 // If inheritTime is true, gp should inherit the remaining time in the
4405 // current time slice. Otherwise, it should start a new time slice.
4406 // Executed only by the owner P.
4407 func runqget(_p_ *p) (gp *g, inheritTime bool) {
4408 // If there's a runnext, it's the next G to run.
4409 for {
4410 next := _p_.runnext
4411 if next == 0 {
4412 break
4414 if _p_.runnext.cas(next, 0) {
4415 return next.ptr(), true
4419 for {
4420 h := atomic.Load(&_p_.runqhead) // load-acquire, synchronize with other consumers
4421 t := _p_.runqtail
4422 if t == h {
4423 return nil, false
4425 gp := _p_.runq[h%uint32(len(_p_.runq))].ptr()
4426 if atomic.Cas(&_p_.runqhead, h, h+1) { // cas-release, commits consume
4427 return gp, false
4432 // Grabs a batch of goroutines from _p_'s runnable queue into batch.
4433 // Batch is a ring buffer starting at batchHead.
4434 // Returns number of grabbed goroutines.
4435 // Can be executed by any P.
4436 func runqgrab(_p_ *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool) uint32 {
4437 for {
4438 h := atomic.Load(&_p_.runqhead) // load-acquire, synchronize with other consumers
4439 t := atomic.Load(&_p_.runqtail) // load-acquire, synchronize with the producer
4440 n := t - h
4441 n = n - n/2
4442 if n == 0 {
4443 if stealRunNextG {
4444 // Try to steal from _p_.runnext.
4445 if next := _p_.runnext; next != 0 {
4446 if _p_.status == _Prunning {
4447 // Sleep to ensure that _p_ isn't about to run the g
4448 // we are about to steal.
4449 // The important use case here is when the g running
4450 // on _p_ ready()s another g and then almost
4451 // immediately blocks. Instead of stealing runnext
4452 // in this window, back off to give _p_ a chance to
4453 // schedule runnext. This will avoid thrashing gs
4454 // between different Ps.
4455 // A sync chan send/recv takes ~50ns as of time of
4456 // writing, so 3us gives ~50x overshoot.
4457 if GOOS != "windows" {
4458 usleep(3)
4459 } else {
4460 // On windows system timer granularity is
4461 // 1-15ms, which is way too much for this
4462 // optimization. So just yield.
4463 osyield()
4466 if !_p_.runnext.cas(next, 0) {
4467 continue
4469 batch[batchHead%uint32(len(batch))] = next
4470 return 1
4473 return 0
4475 if n > uint32(len(_p_.runq)/2) { // read inconsistent h and t
4476 continue
4478 for i := uint32(0); i < n; i++ {
4479 g := _p_.runq[(h+i)%uint32(len(_p_.runq))]
4480 batch[(batchHead+i)%uint32(len(batch))] = g
4482 if atomic.Cas(&_p_.runqhead, h, h+n) { // cas-release, commits consume
4483 return n
4488 // Steal half of elements from local runnable queue of p2
4489 // and put onto local runnable queue of p.
4490 // Returns one of the stolen elements (or nil if failed).
4491 func runqsteal(_p_, p2 *p, stealRunNextG bool) *g {
4492 t := _p_.runqtail
4493 n := runqgrab(p2, &_p_.runq, t, stealRunNextG)
4494 if n == 0 {
4495 return nil
4498 gp := _p_.runq[(t+n)%uint32(len(_p_.runq))].ptr()
4499 if n == 0 {
4500 return gp
4502 h := atomic.Load(&_p_.runqhead) // load-acquire, synchronize with consumers
4503 if t-h+n >= uint32(len(_p_.runq)) {
4504 throw("runqsteal: runq overflow")
4506 atomic.Store(&_p_.runqtail, t+n) // store-release, makes the item available for consumption
4507 return gp
4510 //go:linkname setMaxThreads runtime_debug.setMaxThreads
4511 func setMaxThreads(in int) (out int) {
4512 lock(&sched.lock)
4513 out = int(sched.maxmcount)
4514 if in > 0x7fffffff { // MaxInt32
4515 sched.maxmcount = 0x7fffffff
4516 } else {
4517 sched.maxmcount = int32(in)
4519 checkmcount()
4520 unlock(&sched.lock)
4521 return
4524 //go:nosplit
4525 func procPin() int {
4526 _g_ := getg()
4527 mp := _g_.m
4529 mp.locks++
4530 return int(mp.p.ptr().id)
4533 //go:nosplit
4534 func procUnpin() {
4535 _g_ := getg()
4536 _g_.m.locks--
4539 //go:linkname sync_runtime_procPin sync.runtime_procPin
4540 //go:nosplit
4541 func sync_runtime_procPin() int {
4542 return procPin()
4545 //go:linkname sync_runtime_procUnpin sync.runtime_procUnpin
4546 //go:nosplit
4547 func sync_runtime_procUnpin() {
4548 procUnpin()
4551 //go:linkname sync_atomic_runtime_procPin sync_atomic.runtime_procPin
4552 //go:nosplit
4553 func sync_atomic_runtime_procPin() int {
4554 return procPin()
4557 //go:linkname sync_atomic_runtime_procUnpin sync_atomic.runtime_procUnpin
4558 //go:nosplit
4559 func sync_atomic_runtime_procUnpin() {
4560 procUnpin()
4563 // Active spinning for sync.Mutex.
4564 //go:linkname sync_runtime_canSpin sync.runtime_canSpin
4565 //go:nosplit
4566 func sync_runtime_canSpin(i int) bool {
4567 // sync.Mutex is cooperative, so we are conservative with spinning.
4568 // Spin only few times and only if running on a multicore machine and
4569 // GOMAXPROCS>1 and there is at least one other running P and local runq is empty.
4570 // As opposed to runtime mutex we don't do passive spinning here,
4571 // because there can be work on global runq on on other Ps.
4572 if i >= active_spin || ncpu <= 1 || gomaxprocs <= int32(sched.npidle+sched.nmspinning)+1 {
4573 return false
4575 if p := getg().m.p.ptr(); !runqempty(p) {
4576 return false
4578 return true
4581 //go:linkname sync_runtime_doSpin sync.runtime_doSpin
4582 //go:nosplit
4583 func sync_runtime_doSpin() {
4584 procyield(active_spin_cnt)
4587 var stealOrder randomOrder
4589 // randomOrder/randomEnum are helper types for randomized work stealing.
4590 // They allow to enumerate all Ps in different pseudo-random orders without repetitions.
4591 // The algorithm is based on the fact that if we have X such that X and GOMAXPROCS
4592 // are coprime, then a sequences of (i + X) % GOMAXPROCS gives the required enumeration.
4593 type randomOrder struct {
4594 count uint32
4595 coprimes []uint32
4598 type randomEnum struct {
4599 i uint32
4600 count uint32
4601 pos uint32
4602 inc uint32
4605 func (ord *randomOrder) reset(count uint32) {
4606 ord.count = count
4607 ord.coprimes = ord.coprimes[:0]
4608 for i := uint32(1); i <= count; i++ {
4609 if gcd(i, count) == 1 {
4610 ord.coprimes = append(ord.coprimes, i)
4615 func (ord *randomOrder) start(i uint32) randomEnum {
4616 return randomEnum{
4617 count: ord.count,
4618 pos: i % ord.count,
4619 inc: ord.coprimes[i%uint32(len(ord.coprimes))],
4623 func (enum *randomEnum) done() bool {
4624 return enum.i == enum.count
4627 func (enum *randomEnum) next() {
4628 enum.i++
4629 enum.pos = (enum.pos + enum.inc) % enum.count
4632 func (enum *randomEnum) position() uint32 {
4633 return enum.pos
4636 func gcd(a, b uint32) uint32 {
4637 for b != 0 {
4638 a, b = b, a%b
4640 return a