1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
8 "runtime/internal/atomic"
17 // Beyond indicating the general state of a G, the G status
18 // acts like a lock on the goroutine's stack (and hence its
19 // ability to execute user code).
21 // If you add to this list, add to the list
22 // of "okay during garbage collection" status
25 // _Gidle means this goroutine was just allocated and has not
26 // yet been initialized.
29 // _Grunnable means this goroutine is on a run queue. It is
30 // not currently executing user code. The stack is not owned.
33 // _Grunning means this goroutine may execute user code. The
34 // stack is owned by this goroutine. It is not on a run queue.
35 // It is assigned an M and a P.
38 // _Gsyscall means this goroutine is executing a system call.
39 // It is not executing user code. The stack is owned by this
40 // goroutine. It is not on a run queue. It is assigned an M.
43 // _Gwaiting means this goroutine is blocked in the runtime.
44 // It is not executing user code. It is not on a run queue,
45 // but should be recorded somewhere (e.g., a channel wait
46 // queue) so it can be ready()d when necessary. The stack is
47 // not owned *except* that a channel operation may read or
48 // write parts of the stack under the appropriate channel
49 // lock. Otherwise, it is not safe to access the stack after a
50 // goroutine enters _Gwaiting (e.g., it may get moved).
53 // _Gmoribund_unused is currently unused, but hardcoded in gdb
55 _Gmoribund_unused
// 5
57 // _Gdead means this goroutine is currently unused. It may be
58 // just exited, on a free list, or just being initialized. It
59 // is not executing user code. It may or may not have a stack
60 // allocated. The G and its stack (if any) are owned by the M
61 // that is exiting the G or that obtained the G from the free
65 // _Genqueue_unused is currently unused.
68 // _Gcopystack means this goroutine's stack is being moved. It
69 // is not executing user code and is not on a run queue. The
70 // stack is owned by the goroutine that put it in _Gcopystack.
73 // _Gscan combined with one of the above states other than
74 // _Grunning indicates that GC is scanning the stack. The
75 // goroutine is not executing user code and the stack is owned
76 // by the goroutine that set the _Gscan bit.
78 // _Gscanrunning is different: it is used to briefly block
79 // state transitions while GC signals the G to scan its own
80 // stack. This is otherwise like _Grunning.
82 // atomicstatus&~Gscan gives the state the goroutine will
83 // return to when the scan completes.
85 _Gscanrunnable
= _Gscan
+ _Grunnable
// 0x1001
86 _Gscanrunning
= _Gscan
+ _Grunning
// 0x1002
87 _Gscansyscall
= _Gscan
+ _Gsyscall
// 0x1003
88 _Gscanwaiting
= _Gscan
+ _Gwaiting
// 0x1004
94 _Prunning
// Only this P is allowed to change from _Prunning.
100 // Mutual exclusion locks. In the uncontended case,
101 // as fast as spin locks (just a few user-level instructions),
102 // but on the contention path they sleep in the kernel.
103 // A zeroed Mutex is unlocked (no need to initialize each lock).
105 // Futex-based impl treats it as uint32 key,
106 // while sema-based impl as M* waitm.
107 // Used to be a union, but unions break precise GC.
111 // sleep and wakeup on one-time events.
112 // before any calls to notesleep or notewakeup,
113 // must call noteclear to initialize the Note.
114 // then, exactly one thread can call notesleep
115 // and exactly one thread can call notewakeup (once).
116 // once notewakeup has been called, the notesleep
117 // will return. future notesleep will return immediately.
118 // subsequent noteclear must be called only after
119 // previous notesleep has returned, e.g. it's disallowed
120 // to call noteclear straight after notewakeup.
122 // notetsleep is like notesleep but wakes up after
123 // a given number of nanoseconds even if the event
124 // has not yet happened. if a goroutine uses notetsleep to
125 // wake up early, it must wait to call noteclear until it
126 // can be sure that no other goroutine is calling
129 // notesleep/notetsleep are generally called on g0,
130 // notetsleepg is similar to notetsleep but is called on user g.
132 // Futex-based impl treats it as uint32 key,
133 // while sema-based impl as M* waitm.
134 // Used to be a union, but unions break precise GC.
138 type funcval
struct {
140 // variable-size, fn-specific data here
143 // The representation of a non-empty interface.
144 // See comment in iface.go for more details on this struct.
150 // The representation of an empty interface.
151 // See comment in iface.go for more details on this struct.
157 func efaceOf(ep
*interface{}) *eface
{
158 return (*eface
)(unsafe
.Pointer(ep
))
161 // The guintptr, muintptr, and puintptr are all used to bypass write barriers.
162 // It is particularly important to avoid write barriers when the current P has
163 // been released, because the GC thinks the world is stopped, and an
164 // unexpected write barrier would not be synchronized with the GC,
165 // which can lead to a half-executed write barrier that has marked the object
166 // but not queued it. If the GC skips the object and completes before the
167 // queuing can occur, it will incorrectly free the object.
169 // We tried using special assignment functions invoked only when not
170 // holding a running P, but then some updates to a particular memory
171 // word went through write barriers and some did not. This breaks the
172 // write barrier shadow checking mode, and it is also scary: better to have
173 // a word that is completely ignored by the GC than to have one for which
174 // only a few updates are ignored.
176 // Gs, Ms, and Ps are always reachable via true pointers in the
177 // allgs, allm, and allp lists or (during allocation before they reach those lists)
178 // from stack variables.
180 // A guintptr holds a goroutine pointer, but typed as a uintptr
181 // to bypass write barriers. It is used in the Gobuf goroutine state
182 // and in scheduling lists that are manipulated without a P.
184 // The Gobuf.g goroutine pointer is almost always updated by assembly code.
185 // In one of the few places it is updated by Go code - func save - it must be
186 // treated as a uintptr to avoid a write barrier being emitted at a bad time.
187 // Instead of figuring out how to emit the write barriers missing in the
188 // assembly manipulation, we change the type of the field to uintptr,
189 // so that it does not require write barriers at all.
191 // Goroutine structs are published in the allg list and never freed.
192 // That will keep the goroutine structs from being collected.
193 // There is never a time that Gobuf.g's contain the only references
194 // to a goroutine: the publishing of the goroutine in allg comes first.
195 // Goroutine pointers are also kept in non-GC-visible places like TLS,
196 // so I can't see them ever moving. If we did want to start moving data
197 // in the GC, we'd need to allocate the goroutine structs from an
198 // alternate arena. Using guintptr doesn't make that problem any worse.
199 type guintptr
uintptr
202 func (gp guintptr
) ptr() *g
{ return (*g
)(unsafe
.Pointer(gp
)) }
205 func (gp
*guintptr
) set(g
*g
) { *gp
= guintptr(unsafe
.Pointer(g
)) }
208 func (gp
*guintptr
) cas(old
, new guintptr
) bool {
209 return atomic
.Casuintptr((*uintptr)(unsafe
.Pointer(gp
)), uintptr(old
), uintptr(new))
212 type puintptr
uintptr
215 func (pp puintptr
) ptr() *p
{ return (*p
)(unsafe
.Pointer(pp
)) }
218 func (pp
*puintptr
) set(p
*p
) { *pp
= puintptr(unsafe
.Pointer(p
)) }
220 type muintptr
uintptr
223 func (mp muintptr
) ptr() *m
{ return (*m
)(unsafe
.Pointer(mp
)) }
226 func (mp
*muintptr
) set(m
*m
) { *mp
= muintptr(unsafe
.Pointer(m
)) }
228 // sudog represents a g in a wait list, such as for sending/receiving
231 // sudog is necessary because the g ↔ synchronization object relation
232 // is many-to-many. A g can be on many wait lists, so there may be
233 // many sudogs for one g; and many gs may be waiting on the same
234 // synchronization object, so there may be many sudogs for one object.
236 // sudogs are allocated from a special pool. Use acquireSudog and
237 // releaseSudog to allocate and free them.
239 // The following fields are protected by the hchan.lock of the
240 // channel this sudog is blocking on. shrinkstack depends on
244 selectdone
*uint32 // CAS to 1 to win select race (may point to stack)
247 elem unsafe
.Pointer
// data element (may point to stack)
249 // The following fields are never accessed concurrently.
250 // waitlink is only accessed by g.
254 waitlink
*sudog
// g.waiting list
258 type gcstats
struct {
259 // the struct must consist of only uint64's,
260 // because it is casted to uint64[].
271 type libcall struct {
273 n uintptr // number of parameters
274 args uintptr // parameters
275 r1 uintptr // return values
277 err uintptr // error number
285 // describes how to handle callback
286 type wincallbackcontext struct {
287 gobody unsafe.Pointer // go function to call
288 argsize uintptr // callback arguments size (in bytes)
289 restorestack uintptr // adjust stack on return by (in bytes) (386 only)
297 // Stack describes a Go execution stack.
298 // The bounds of the stack are exactly [lo, hi),
299 // with no implicit data structures on either side.
305 // stkbar records the state of a G's stack barrier.
307 savedLRPtr uintptr // location overwritten by stack barrier PC
308 savedLRVal uintptr // value overwritten at savedLRPtr
314 // stack describes the actual stack memory: [stack.lo, stack.hi).
315 // stackguard0 is the stack pointer compared in the Go stack growth prologue.
316 // It is stack.lo+StackGuard normally, but can be StackPreempt to trigger a preemption.
317 // stackguard1 is the stack pointer compared in the C stack growth prologue.
318 // It is stack.lo+StackGuard on g0 and gsignal stacks.
319 // It is ~0 on other goroutine stacks, to trigger a call to morestackc (and crash).
320 // Not for gccgo: stack stack // offset known to runtime/cgo
321 // Not for gccgo: stackguard0 uintptr // offset known to liblink
322 // Not for gccgo: stackguard1 uintptr // offset known to liblink
324 _panic
*_panic
// innermost panic - offset known to liblink
325 _defer
*_defer
// innermost defer
326 m
*m
// current m; offset known to arm liblink
327 // Not for gccgo: stackAlloc uintptr // stack allocation is [stack.lo,stack.lo+stackAlloc)
328 // Not for gccgo: sched gobuf
329 syscallsp
uintptr // if status==Gsyscall, syscallsp = sched.sp to use during gc
330 syscallpc
uintptr // if status==Gsyscall, syscallpc = sched.pc to use during gc
331 // Not for gccgo: stkbar []stkbar // stack barriers, from low to high (see top of mstkbar.go)
332 // Not for gccgo: stkbarPos uintptr // index of lowest stack barrier not hit
333 // Not for gccgo: stktopsp uintptr // expected sp at top of stack, to check in traceback
334 param unsafe
.Pointer
// passed parameter on wakeup
336 // Not for gccgo: stackLock uint32 // sigprof/scang lock; TODO: fold in to atomicstatus
338 waitsince
int64 // approx time when the g become blocked
339 waitreason
string // if status==Gwaiting
341 preempt
bool // preemption signal, duplicates stackguard0 = stackpreempt
342 paniconfault
bool // panic (instead of crash) on unexpected fault address
343 preemptscan
bool // preempted g does scan for gc
344 gcscandone
bool // g has scanned stack; protected by _Gscan bit in status
345 gcscanvalid
bool // false at start of gc cycle, true if G has not run since last scan; transition from true to false by calling queueRescan and false to true by calling dequeueRescan
346 throwsplit
bool // must not split stack
347 raceignore
int8 // ignore race detection events
348 sysblocktraced
bool // StartTrace has emitted EvGoInSyscall about this goroutine
349 sysexitticks
int64 // cputicks when syscall has returned (for tracing)
350 traceseq
uint64 // trace event sequencer
351 tracelastp puintptr
// last P emitted an event for this goroutine
358 gopc
uintptr // pc of go statement that created this goroutine
359 startpc
uintptr // pc of goroutine function
360 // Not for gccgo: racectx uintptr
361 waiting
*sudog
// sudog structures this g is waiting on (that have a valid elem ptr); in lock order
362 // Not for gccgo: cgoCtxt []uintptr // cgo traceback context
366 // gcRescan is this G's index in work.rescan.list. If this is
367 // -1, this G is not on the rescan list.
369 // If gcphase != _GCoff and this G is visible to the garbage
370 // collector, writes to this are protected by work.rescan.lock.
373 // gcAssistBytes is this G's GC assist credit in terms of
374 // bytes allocated. If this is positive, then the G has credit
375 // to allocate gcAssistBytes bytes without assisting. If this
376 // is negative, then the G must correct this by performing
377 // scan work. We track this in bytes to make it fast to update
378 // and check for debt in the malloc hot path. The assist ratio
379 // determines how this corresponds to scan work debt.
382 // Remaining fields are specific to gccgo.
384 exception unsafe
.Pointer
// current exception being thrown
385 isforeign
bool // whether current exception is not from Go
387 // Fields that hold stack and context information if status is Gsyscall
388 gcstack unsafe
.Pointer
390 gcnextsegment unsafe
.Pointer
391 gcnextsp unsafe
.Pointer
392 gcinitialsp unsafe
.Pointer
395 entry unsafe
.Pointer
// goroutine entry point
396 fromgogo
bool // whether entered from gogo function
398 issystem
bool // do not output in stack dump
399 isbackground
bool // ignore in deadlock detector
401 traceback
*tracebackg
// stack traceback buffer
403 context g_ucontext_t
// saved context for setcontext
404 stackcontext
[10]unsafe
.Pointer
// split-stack context
408 g0
*g
// goroutine with scheduling stack
409 // Not for gccgo: morebuf gobuf // gobuf arg to morestack
410 // Not for gccgo: divmod uint32 // div/mod denominator for arm - known to liblink
412 // Fields not known to debuggers.
413 procid
uint64 // for debuggers, but offset not hard-coded
414 gsignal
*g
// signal-handling g
415 sigmask sigset
// storage for saved signal mask
416 // Not for gccgo: tls [6]uintptr // thread-local storage (for x86 extern register)
418 curg
*g
// current running goroutine
419 caughtsig guintptr
// goroutine running during fatal signal
420 p puintptr
// attached p for executing go code (nil if not executing go code)
425 preemptoff
string // if != "", keep curg running on this m
431 spinning
bool // m is out of work and is actively looking for work
432 blocked
bool // m is blocked on a note
433 inwb
bool // m is executing a write barrier
434 newSigstack
bool // minit on C thread called sigaltstack
437 ncgocall
uint64 // number of cgo calls in total
438 ncgo
int32 // number of cgo calls currently in progress
439 // Not for gccgo: cgoCallersUse uint32 // if non-zero, cgoCallers in use temporarily
440 // Not for gccgo: cgoCallers *cgoCallers // cgo traceback if crashing in cgo call
442 alllink
*m
// on allm
446 createstack
[32]location
// stack that created this thread.
447 // Not for gccgo: freglo [16]uint32 // d[i] lsb and f[i]
448 // Not for gccgo: freghi [16]uint32 // d[i] msb and f[i+16]
449 // Not for gccgo: fflag uint32 // floating point compare flags
450 locked
uint32 // tracking for lockosthread
451 nextwaitm
uintptr // next m waiting for lock
455 waitunlockf unsafe
.Pointer
// todo go func(*g, unsafe.pointer) bool
456 waitlock unsafe
.Pointer
461 // Not for gccgo: thread uintptr // thread handle
463 // these are here because they are too large to be on the stack
464 // of low-level NOSPLIT functions.
465 // Not for gccgo: libcall libcall
466 // Not for gccgo: libcallpc uintptr // for cpu profiler
467 // Not for gccgo: libcallsp uintptr
468 // Not for gccgo: libcallg guintptr
469 // Not for gccgo: syscall libcall // stores syscall parameters on windows
473 // Remaining fields are specific to gccgo.
475 gsignalstack unsafe
.Pointer
// stack for gsignal
476 gsignalstacksize
uintptr
478 dropextram
bool // drop after call is done
487 status
uint32 // one of pidle/prunning/...
489 schedtick
uint32 // incremented on every scheduler call
490 syscalltick
uint32 // incremented on every system call
491 m muintptr
// back-link to associated m (nil if idle)
493 // Not for gccgo: racectx uintptr
495 // gccgo has only one size of defer.
497 deferpoolbuf
[32]*_defer
499 // Cache of goroutine ids, amortizes accesses to runtime·sched.goidgen.
503 // Queue of runnable goroutines. Accessed without lock.
507 // runnext, if non-nil, is a runnable G that was ready'd by
508 // the current G and should be run next instead of what's in
509 // runq if there's time remaining in the running G's time
510 // slice. It will inherit the time left in the current time
511 // slice. If a set of goroutines is locked in a
512 // communicate-and-wait pattern, this schedules that set as a
513 // unit and eliminates the (potentially large) scheduling
514 // latency that otherwise arises from adding the ready'd
515 // goroutines to the end of the run queue.
518 // Available G's (status == Gdead)
527 // Not for gccgo for now: palloc persistentAlloc // per-P to avoid mutex
530 gcAssistTime
int64 // Nanoseconds in assistAlloc
531 gcBgMarkWorker guintptr
532 gcMarkWorkerMode gcMarkWorkerMode
534 // gcw is this P's GC work buffer cache. The work buffer is
535 // filled by write barriers, drained by mutator assists, and
536 // disposed on certain GC state transitions.
537 // Not for gccgo for now: gcw gcWork
539 runSafePointFn
uint32 // if 1, run sched.safePointFn at next safe point
545 // The max value of GOMAXPROCS.
546 // There are no fundamental restrictions on the value.
547 _MaxGomaxprocs
= 1 << 8
551 // accessed atomically. keep at top to ensure alignment on 32-bit systems.
557 midle muintptr
// idle m's waiting for work
558 nmidle
int32 // number of idle m's waiting for work
559 nmidlelocked
int32 // number of locked m's waiting for work
560 mcount
int32 // number of m's that have been created
561 maxmcount
int32 // maximum number of m's allowed (or die)
563 ngsys
uint32 // number of system goroutines; updated atomically
565 pidle puintptr
// idle p's
567 nmspinning
uint32 // See "Worker thread parking/unparking" comment in proc.go.
569 // Global runnable queue.
574 // Global cache of dead G's.
579 // Central cache of sudog structs.
583 // Central pool of available defer structs.
587 gcwaiting
uint32 // gc is waiting to run
593 // safepointFn should be called on each P at the next GC
594 // safepoint if p.runSafePointFn is set.
599 profilehz
int32 // cpu profiling rate
601 procresizetime
int64 // nanotime() of last change to gomaxprocs
602 totaltime
int64 // ∫gomaxprocs dt up to procresizetime
605 // The m.locked word holds two pieces of state counting active calls to LockOSThread/lockOSThread.
606 // The low bit (LockExternal) is a boolean reporting whether any LockOSThread call is active.
607 // External locks are not recursive; a second lock is silently ignored.
608 // The upper bits of m.locked record the nesting depth of calls to lockOSThread
609 // (counting up by LockInternal), popped by unlockOSThread (counting down by LockInternal).
610 // Internal locks can be recursive. For instance, a lock for cgo can occur while the main
611 // goroutine is holding the lock during the initialization phase.
618 _SigNotify
= 1 << iota // let signal.Notify have signal, even if from kernel
619 _SigKill
// if signal.Notify doesn't take it, exit quietly
620 _SigThrow
// if signal.Notify doesn't take it, exit loudly
621 _SigPanic
// if the signal is from the kernel, panic
622 _SigDefault
// if the signal isn't explicitly requested, don't monitor it
623 _SigHandling
// our signal handler is registered
624 _SigGoExit
// cause all runtime procs to exit (only used on Plan 9).
625 _SigSetStack
// add SA_ONSTACK to libc handler
626 _SigUnblock
// unblocked in minit
630 gccgo does not use this.
632 // Layout of in-memory per-function information prepared by linker
633 // See https://golang.org/s/go12symtab.
634 // Keep in sync with linker
635 // and with package debug/gosym and with symtab.go in package runtime.
637 entry uintptr // start pc
638 nameoff int32 // function name
640 args int32 // in/out args size
641 _ int32 // previously legacy frame size; kept for layout compatibility
652 // Lock-free stack node.
653 // // Also known to export_test.go.
659 type forcegcstate
struct {
665 // startup_random_data holds random bytes initialized at startup. These come from
666 // the ELF AT_RANDOM auxiliary vector (vdso_linux_amd64.go or os_linux_386.go).
667 var startupRandomData
[]byte
669 // extendRandom extends the random numbers in r[:n] to the whole slice r.
670 // Treats n<0 as n==0.
671 func extendRandom(r
[]byte, n
int) {
676 // Extend random bits using hash function & time seed
681 h
:= memhash(unsafe
.Pointer(&r
[n
-w
]), uintptr(nanotime()), uintptr(w
))
682 for i
:= 0; i
< sys
.PtrSize
&& n
< len(r
); i
++ {
690 // deferred subroutine calls
691 // This is the gccgo version.
693 // The next entry in the stack.
696 // The stack variable for the function which called this defer
697 // statement. This is set to true if we are returning from
698 // that function, false if we are panicing through it.
701 // The value of the panic stack when this function is
702 // deferred. This function can not recover this value from
703 // the panic stack. This can happen if a deferred function
704 // has a defer statement itself.
707 // The function to call.
710 // The argument to pass to the function.
713 // The return address that a recover thunk matches against.
714 // This is set by __go_set_defer_retaddr which is called by
715 // the thunks created by defer statements.
718 // Set to true if a function created by reflect.MakeFunc is
719 // permitted to recover. The return address of such a
720 // function function will be somewhere in libffi, so __retaddr
722 makefunccanrecover
bool
724 // Set to true if this defer stack entry is not part of the
730 // This is the gccgo version.
732 // The next entry in the stack.
735 // The value associated with this panic.
738 // Whether this panic has been recovered.
741 // Whether this panic was pushed on the stack because of an
742 // exception thrown in some other language.
747 _TraceRuntimeFrames
= 1 << iota // include frames for internal runtime functions.
748 _TraceTrap
// the initial PC, SP are from a trap, not a return PC from a call
749 _TraceJumpStack
// if traceback is on a systemstack, resume trace at g that called into it
752 // The maximum number of frames we print for a traceback
753 const _TracebackMaxFrames
= 100
756 // emptystring string
760 allp
[_MaxGomaxprocs
+ 1]*p
768 // Information about what cpu features are available.
775 // lfenceBeforeRdtsc bool
779 // goarm uint8 // set by cmd/link on arm systems
780 // framepointer_enabled bool // set by cmd/link
783 // Set by the linker so the runtime can determine the buildmode.
785 islibrary
bool // -buildmode=c-shared
786 isarchive
bool // -buildmode=c-archive
789 // Types that are only used by gccgo.
791 // g_ucontext_t is a Go version of the C ucontext_t type, used by getcontext.
792 // _sizeof_ucontext_t is defined by mkrsysinfo.sh from <ucontext.h>.
793 // On some systems getcontext and friends require a value that is
794 // aligned to a 16-byte boundary. We implement this by increasing the
795 // required size and picking an appropriate offset when we use the
797 type g_ucontext_t
[(_sizeof_ucontext_t
+ 15) / unsafe
.Sizeof(unsafe
.Pointer(nil))]unsafe
.Pointer
799 // sigset is the Go version of the C type sigset_t.
800 // _sigset_t is defined by the Makefile from <signal.h>.
801 type sigset _sigset_t