runtime: scan register backing store on ia64
[official-gcc.git] / libgo / go / runtime / runtime2.go
blob0299d5a788f38eb32b5bfb2158e5851006f318e1
1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 package runtime
7 import (
8 "runtime/internal/atomic"
9 "runtime/internal/sys"
10 "unsafe"
13 // defined constants
14 const (
15 // G status
17 // Beyond indicating the general state of a G, the G status
18 // acts like a lock on the goroutine's stack (and hence its
19 // ability to execute user code).
21 // If you add to this list, add to the list
22 // of "okay during garbage collection" status
23 // in mgcmark.go too.
25 // _Gidle means this goroutine was just allocated and has not
26 // yet been initialized.
27 _Gidle = iota // 0
29 // _Grunnable means this goroutine is on a run queue. It is
30 // not currently executing user code. The stack is not owned.
31 _Grunnable // 1
33 // _Grunning means this goroutine may execute user code. The
34 // stack is owned by this goroutine. It is not on a run queue.
35 // It is assigned an M and a P.
36 _Grunning // 2
38 // _Gsyscall means this goroutine is executing a system call.
39 // It is not executing user code. The stack is owned by this
40 // goroutine. It is not on a run queue. It is assigned an M.
41 _Gsyscall // 3
43 // _Gwaiting means this goroutine is blocked in the runtime.
44 // It is not executing user code. It is not on a run queue,
45 // but should be recorded somewhere (e.g., a channel wait
46 // queue) so it can be ready()d when necessary. The stack is
47 // not owned *except* that a channel operation may read or
48 // write parts of the stack under the appropriate channel
49 // lock. Otherwise, it is not safe to access the stack after a
50 // goroutine enters _Gwaiting (e.g., it may get moved).
51 _Gwaiting // 4
53 // _Gmoribund_unused is currently unused, but hardcoded in gdb
54 // scripts.
55 _Gmoribund_unused // 5
57 // _Gdead means this goroutine is currently unused. It may be
58 // just exited, on a free list, or just being initialized. It
59 // is not executing user code. It may or may not have a stack
60 // allocated. The G and its stack (if any) are owned by the M
61 // that is exiting the G or that obtained the G from the free
62 // list.
63 _Gdead // 6
65 // _Genqueue_unused is currently unused.
66 _Genqueue_unused // 7
68 // _Gcopystack means this goroutine's stack is being moved. It
69 // is not executing user code and is not on a run queue. The
70 // stack is owned by the goroutine that put it in _Gcopystack.
71 _Gcopystack // 8
73 // _Gscan combined with one of the above states other than
74 // _Grunning indicates that GC is scanning the stack. The
75 // goroutine is not executing user code and the stack is owned
76 // by the goroutine that set the _Gscan bit.
78 // _Gscanrunning is different: it is used to briefly block
79 // state transitions while GC signals the G to scan its own
80 // stack. This is otherwise like _Grunning.
82 // atomicstatus&~Gscan gives the state the goroutine will
83 // return to when the scan completes.
84 _Gscan = 0x1000
85 _Gscanrunnable = _Gscan + _Grunnable // 0x1001
86 _Gscanrunning = _Gscan + _Grunning // 0x1002
87 _Gscansyscall = _Gscan + _Gsyscall // 0x1003
88 _Gscanwaiting = _Gscan + _Gwaiting // 0x1004
91 const (
92 // P status
93 _Pidle = iota
94 _Prunning // Only this P is allowed to change from _Prunning.
95 _Psyscall
96 _Pgcstop
97 _Pdead
100 // Mutual exclusion locks. In the uncontended case,
101 // as fast as spin locks (just a few user-level instructions),
102 // but on the contention path they sleep in the kernel.
103 // A zeroed Mutex is unlocked (no need to initialize each lock).
104 type mutex struct {
105 // Futex-based impl treats it as uint32 key,
106 // while sema-based impl as M* waitm.
107 // Used to be a union, but unions break precise GC.
108 key uintptr
111 // sleep and wakeup on one-time events.
112 // before any calls to notesleep or notewakeup,
113 // must call noteclear to initialize the Note.
114 // then, exactly one thread can call notesleep
115 // and exactly one thread can call notewakeup (once).
116 // once notewakeup has been called, the notesleep
117 // will return. future notesleep will return immediately.
118 // subsequent noteclear must be called only after
119 // previous notesleep has returned, e.g. it's disallowed
120 // to call noteclear straight after notewakeup.
122 // notetsleep is like notesleep but wakes up after
123 // a given number of nanoseconds even if the event
124 // has not yet happened. if a goroutine uses notetsleep to
125 // wake up early, it must wait to call noteclear until it
126 // can be sure that no other goroutine is calling
127 // notewakeup.
129 // notesleep/notetsleep are generally called on g0,
130 // notetsleepg is similar to notetsleep but is called on user g.
131 type note struct {
132 // Futex-based impl treats it as uint32 key,
133 // while sema-based impl as M* waitm.
134 // Used to be a union, but unions break precise GC.
135 key uintptr
138 type funcval struct {
139 fn uintptr
140 // variable-size, fn-specific data here
143 // The representation of a non-empty interface.
144 // See comment in iface.go for more details on this struct.
145 type iface struct {
146 tab unsafe.Pointer
147 data unsafe.Pointer
150 // The representation of an empty interface.
151 // See comment in iface.go for more details on this struct.
152 type eface struct {
153 _type *_type
154 data unsafe.Pointer
157 func efaceOf(ep *interface{}) *eface {
158 return (*eface)(unsafe.Pointer(ep))
161 // The guintptr, muintptr, and puintptr are all used to bypass write barriers.
162 // It is particularly important to avoid write barriers when the current P has
163 // been released, because the GC thinks the world is stopped, and an
164 // unexpected write barrier would not be synchronized with the GC,
165 // which can lead to a half-executed write barrier that has marked the object
166 // but not queued it. If the GC skips the object and completes before the
167 // queuing can occur, it will incorrectly free the object.
169 // We tried using special assignment functions invoked only when not
170 // holding a running P, but then some updates to a particular memory
171 // word went through write barriers and some did not. This breaks the
172 // write barrier shadow checking mode, and it is also scary: better to have
173 // a word that is completely ignored by the GC than to have one for which
174 // only a few updates are ignored.
176 // Gs and Ps are always reachable via true pointers in the
177 // allgs and allp lists or (during allocation before they reach those lists)
178 // from stack variables.
180 // Ms are always reachable via true pointers either from allm or
181 // freem. Unlike Gs and Ps we do free Ms, so it's important that
182 // nothing ever hold an muintptr across a safe point.
184 // A guintptr holds a goroutine pointer, but typed as a uintptr
185 // to bypass write barriers. It is used in the Gobuf goroutine state
186 // and in scheduling lists that are manipulated without a P.
188 // The Gobuf.g goroutine pointer is almost always updated by assembly code.
189 // In one of the few places it is updated by Go code - func save - it must be
190 // treated as a uintptr to avoid a write barrier being emitted at a bad time.
191 // Instead of figuring out how to emit the write barriers missing in the
192 // assembly manipulation, we change the type of the field to uintptr,
193 // so that it does not require write barriers at all.
195 // Goroutine structs are published in the allg list and never freed.
196 // That will keep the goroutine structs from being collected.
197 // There is never a time that Gobuf.g's contain the only references
198 // to a goroutine: the publishing of the goroutine in allg comes first.
199 // Goroutine pointers are also kept in non-GC-visible places like TLS,
200 // so I can't see them ever moving. If we did want to start moving data
201 // in the GC, we'd need to allocate the goroutine structs from an
202 // alternate arena. Using guintptr doesn't make that problem any worse.
203 type guintptr uintptr
205 //go:nosplit
206 func (gp guintptr) ptr() *g { return (*g)(unsafe.Pointer(gp)) }
208 //go:nosplit
209 func (gp *guintptr) set(g *g) { *gp = guintptr(unsafe.Pointer(g)) }
211 //go:nosplit
212 func (gp *guintptr) cas(old, new guintptr) bool {
213 return atomic.Casuintptr((*uintptr)(unsafe.Pointer(gp)), uintptr(old), uintptr(new))
216 // setGNoWB performs *gp = new without a write barrier.
217 // For times when it's impractical to use a guintptr.
218 //go:nosplit
219 //go:nowritebarrier
220 func setGNoWB(gp **g, new *g) {
221 (*guintptr)(unsafe.Pointer(gp)).set(new)
224 type puintptr uintptr
226 //go:nosplit
227 func (pp puintptr) ptr() *p { return (*p)(unsafe.Pointer(pp)) }
229 //go:nosplit
230 func (pp *puintptr) set(p *p) { *pp = puintptr(unsafe.Pointer(p)) }
232 // muintptr is a *m that is not tracked by the garbage collector.
234 // Because we do free Ms, there are some additional constrains on
235 // muintptrs:
237 // 1. Never hold an muintptr locally across a safe point.
239 // 2. Any muintptr in the heap must be owned by the M itself so it can
240 // ensure it is not in use when the last true *m is released.
241 type muintptr uintptr
243 //go:nosplit
244 func (mp muintptr) ptr() *m { return (*m)(unsafe.Pointer(mp)) }
246 //go:nosplit
247 func (mp *muintptr) set(m *m) { *mp = muintptr(unsafe.Pointer(m)) }
249 // setMNoWB performs *mp = new without a write barrier.
250 // For times when it's impractical to use an muintptr.
251 //go:nosplit
252 //go:nowritebarrier
253 func setMNoWB(mp **m, new *m) {
254 (*muintptr)(unsafe.Pointer(mp)).set(new)
257 // sudog represents a g in a wait list, such as for sending/receiving
258 // on a channel.
260 // sudog is necessary because the g ↔ synchronization object relation
261 // is many-to-many. A g can be on many wait lists, so there may be
262 // many sudogs for one g; and many gs may be waiting on the same
263 // synchronization object, so there may be many sudogs for one object.
265 // sudogs are allocated from a special pool. Use acquireSudog and
266 // releaseSudog to allocate and free them.
267 type sudog struct {
268 // The following fields are protected by the hchan.lock of the
269 // channel this sudog is blocking on. shrinkstack depends on
270 // this for sudogs involved in channel ops.
272 g *g
274 // isSelect indicates g is participating in a select, so
275 // g.selectDone must be CAS'd to win the wake-up race.
276 isSelect bool
277 next *sudog
278 prev *sudog
279 elem unsafe.Pointer // data element (may point to stack)
281 // The following fields are never accessed concurrently.
282 // For channels, waitlink is only accessed by g.
283 // For semaphores, all fields (including the ones above)
284 // are only accessed when holding a semaRoot lock.
286 acquiretime int64
287 releasetime int64
288 ticket uint32
289 parent *sudog // semaRoot binary tree
290 waitlink *sudog // g.waiting list or semaRoot
291 waittail *sudog // semaRoot
292 c *hchan // channel
296 Not used by gccgo.
298 type libcall struct {
299 fn uintptr
300 n uintptr // number of parameters
301 args uintptr // parameters
302 r1 uintptr // return values
303 r2 uintptr
304 err uintptr // error number
310 Not used by gccgo.
312 // describes how to handle callback
313 type wincallbackcontext struct {
314 gobody unsafe.Pointer // go function to call
315 argsize uintptr // callback arguments size (in bytes)
316 restorestack uintptr // adjust stack on return by (in bytes) (386 only)
317 cleanstack bool
322 Not used by gccgo.
324 // Stack describes a Go execution stack.
325 // The bounds of the stack are exactly [lo, hi),
326 // with no implicit data structures on either side.
327 type stack struct {
328 lo uintptr
329 hi uintptr
333 type g struct {
334 // Stack parameters.
335 // stack describes the actual stack memory: [stack.lo, stack.hi).
336 // stackguard0 is the stack pointer compared in the Go stack growth prologue.
337 // It is stack.lo+StackGuard normally, but can be StackPreempt to trigger a preemption.
338 // stackguard1 is the stack pointer compared in the C stack growth prologue.
339 // It is stack.lo+StackGuard on g0 and gsignal stacks.
340 // It is ~0 on other goroutine stacks, to trigger a call to morestackc (and crash).
341 // Not for gccgo: stack stack // offset known to runtime/cgo
342 // Not for gccgo: stackguard0 uintptr // offset known to liblink
343 // Not for gccgo: stackguard1 uintptr // offset known to liblink
345 _panic *_panic // innermost panic - offset known to liblink
346 _defer *_defer // innermost defer
347 m *m // current m; offset known to arm liblink
348 // Not for gccgo: sched gobuf
349 syscallsp uintptr // if status==Gsyscall, syscallsp = sched.sp to use during gc
350 syscallpc uintptr // if status==Gsyscall, syscallpc = sched.pc to use during gc
351 // Not for gccgo: stktopsp uintptr // expected sp at top of stack, to check in traceback
352 param unsafe.Pointer // passed parameter on wakeup
353 atomicstatus uint32
354 // Not for gccgo: stackLock uint32 // sigprof/scang lock; TODO: fold in to atomicstatus
355 goid int64
356 waitsince int64 // approx time when the g become blocked
357 waitreason string // if status==Gwaiting
358 schedlink guintptr
359 preempt bool // preemption signal, duplicates stackguard0 = stackpreempt
360 paniconfault bool // panic (instead of crash) on unexpected fault address
361 preemptscan bool // preempted g does scan for gc
362 gcscandone bool // g has scanned stack; protected by _Gscan bit in status
363 gcscanvalid bool // false at start of gc cycle, true if G has not run since last scan; TODO: remove?
364 throwsplit bool // must not split stack
365 raceignore int8 // ignore race detection events
366 sysblocktraced bool // StartTrace has emitted EvGoInSyscall about this goroutine
367 sysexitticks int64 // cputicks when syscall has returned (for tracing)
368 traceseq uint64 // trace event sequencer
369 tracelastp puintptr // last P emitted an event for this goroutine
370 lockedm muintptr
371 sig uint32
372 writebuf []byte
373 sigcode0 uintptr
374 sigcode1 uintptr
375 sigpc uintptr
376 gopc uintptr // pc of go statement that created this goroutine
377 startpc uintptr // pc of goroutine function
378 // Not for gccgo: racectx uintptr
379 waiting *sudog // sudog structures this g is waiting on (that have a valid elem ptr); in lock order
380 // Not for gccgo: cgoCtxt []uintptr // cgo traceback context
381 labels unsafe.Pointer // profiler labels
382 timer *timer // cached timer for time.Sleep
383 selectDone uint32 // are we participating in a select and did someone win the race?
385 // Per-G GC state
387 // gcAssistBytes is this G's GC assist credit in terms of
388 // bytes allocated. If this is positive, then the G has credit
389 // to allocate gcAssistBytes bytes without assisting. If this
390 // is negative, then the G must correct this by performing
391 // scan work. We track this in bytes to make it fast to update
392 // and check for debt in the malloc hot path. The assist ratio
393 // determines how this corresponds to scan work debt.
394 gcAssistBytes int64
396 // Remaining fields are specific to gccgo.
398 exception unsafe.Pointer // current exception being thrown
399 isforeign bool // whether current exception is not from Go
401 // When using split-stacks, these fields holds the results of
402 // __splitstack_find while executing a syscall. These are used
403 // by the garbage collector to scan the goroutine's stack.
405 // When not using split-stacks, g0 stacks are allocated by the
406 // libc and other goroutine stacks are allocated by malg.
407 // gcstack: unused (sometimes cleared)
408 // gcstacksize: g0: 0; others: size of stack
409 // gcnextsegment: unused
410 // gcnextsp: current SP while executing a syscall
411 // gcinitialsp: g0: top of stack; others: start of stack memory
412 // gcnextsp2: current secondary stack pointer (if present)
413 // gcinitialsp2: start of secondary stack (if present)
414 gcstack uintptr
415 gcstacksize uintptr
416 gcnextsegment uintptr
417 gcnextsp uintptr
418 gcinitialsp unsafe.Pointer
419 gcnextsp2 uintptr
420 gcinitialsp2 unsafe.Pointer
422 // gcregs holds the register values while executing a syscall.
423 // This is set by getcontext and scanned by the garbage collector.
424 gcregs g_ucontext_t
426 entry func(unsafe.Pointer) // goroutine function to run
427 entryfn uintptr // function address passed to __go_go
428 fromgogo bool // whether entered from gogo function
430 scanningself bool // whether goroutine is scanning its own stack
432 isSystemGoroutine bool // whether goroutine is a "system" goroutine
434 traceback *tracebackg // stack traceback buffer
436 context g_ucontext_t // saved context for setcontext
437 stackcontext [10]uintptr // split-stack context
440 type m struct {
441 g0 *g // goroutine with scheduling stack
442 // Not for gccgo: morebuf gobuf // gobuf arg to morestack
443 // Not for gccgo: divmod uint32 // div/mod denominator for arm - known to liblink
445 // Fields not known to debuggers.
446 procid uint64 // for debuggers, but offset not hard-coded
447 gsignal *g // signal-handling g
448 // Not for gccgo: goSigStack gsignalStack // Go-allocated signal handling stack
449 sigmask sigset // storage for saved signal mask
450 // Not for gccgo: tls [6]uintptr // thread-local storage (for x86 extern register)
451 mstartfn func()
452 curg *g // current running goroutine
453 caughtsig guintptr // goroutine running during fatal signal
454 p puintptr // attached p for executing go code (nil if not executing go code)
455 nextp puintptr
456 id int64
457 mallocing int32
458 throwing int32
459 preemptoff string // if != "", keep curg running on this m
460 locks int32
461 softfloat int32
462 dying int32
463 profilehz int32
464 helpgc int32
465 spinning bool // m is out of work and is actively looking for work
466 blocked bool // m is blocked on a note
467 inwb bool // m is executing a write barrier
468 newSigstack bool // minit on C thread called sigaltstack
469 printlock int8
470 incgo bool // m is executing a cgo call
471 freeWait uint32 // if == 0, safe to free g0 and delete m (atomic)
472 fastrand [2]uint32
473 needextram bool
474 traceback uint8
475 ncgocall uint64 // number of cgo calls in total
476 ncgo int32 // number of cgo calls currently in progress
477 // Not for gccgo: cgoCallersUse uint32 // if non-zero, cgoCallers in use temporarily
478 // Not for gccgo: cgoCallers *cgoCallers // cgo traceback if crashing in cgo call
479 park note
480 alllink *m // on allm
481 schedlink muintptr
482 mcache *mcache
483 lockedg guintptr
484 createstack [32]location // stack that created this thread.
485 // Not for gccgo: freglo [16]uint32 // d[i] lsb and f[i]
486 // Not for gccgo: freghi [16]uint32 // d[i] msb and f[i+16]
487 // Not for gccgo: fflag uint32 // floating point compare flags
488 lockedExt uint32 // tracking for external LockOSThread
489 lockedInt uint32 // tracking for internal lockOSThread
490 nextwaitm muintptr // next m waiting for lock
491 waitunlockf unsafe.Pointer // todo go func(*g, unsafe.pointer) bool
492 waitlock unsafe.Pointer
493 waittraceev byte
494 waittraceskip int
495 startingtrace bool
496 syscalltick uint32
497 // Not for gccgo: thread uintptr // thread handle
498 freelink *m // on sched.freem
500 // these are here because they are too large to be on the stack
501 // of low-level NOSPLIT functions.
502 // Not for gccgo: libcall libcall
503 // Not for gccgo: libcallpc uintptr // for cpu profiler
504 // Not for gccgo: libcallsp uintptr
505 // Not for gccgo: libcallg guintptr
506 // Not for gccgo: syscall libcall // stores syscall parameters on windows
508 mos mOS
510 // Remaining fields are specific to gccgo.
512 gsignalstack unsafe.Pointer // stack for gsignal
513 gsignalstacksize uintptr
515 dropextram bool // drop after call is done
516 exiting bool // thread is exiting
518 gcing int32
521 type p struct {
522 lock mutex
524 id int32
525 status uint32 // one of pidle/prunning/...
526 link puintptr
527 schedtick uint32 // incremented on every scheduler call
528 syscalltick uint32 // incremented on every system call
529 sysmontick sysmontick // last tick observed by sysmon
530 m muintptr // back-link to associated m (nil if idle)
531 mcache *mcache
532 racectx uintptr
534 // gccgo has only one size of defer.
535 deferpool []*_defer
536 deferpoolbuf [32]*_defer
538 // Cache of goroutine ids, amortizes accesses to runtime·sched.goidgen.
539 goidcache uint64
540 goidcacheend uint64
542 // Queue of runnable goroutines. Accessed without lock.
543 runqhead uint32
544 runqtail uint32
545 runq [256]guintptr
546 // runnext, if non-nil, is a runnable G that was ready'd by
547 // the current G and should be run next instead of what's in
548 // runq if there's time remaining in the running G's time
549 // slice. It will inherit the time left in the current time
550 // slice. If a set of goroutines is locked in a
551 // communicate-and-wait pattern, this schedules that set as a
552 // unit and eliminates the (potentially large) scheduling
553 // latency that otherwise arises from adding the ready'd
554 // goroutines to the end of the run queue.
555 runnext guintptr
557 // Available G's (status == Gdead)
558 gfree *g
559 gfreecnt int32
561 sudogcache []*sudog
562 sudogbuf [128]*sudog
564 tracebuf traceBufPtr
566 // traceSweep indicates the sweep events should be traced.
567 // This is used to defer the sweep start event until a span
568 // has actually been swept.
569 traceSweep bool
570 // traceSwept and traceReclaimed track the number of bytes
571 // swept and reclaimed by sweeping in the current sweep loop.
572 traceSwept, traceReclaimed uintptr
574 palloc persistentAlloc // per-P to avoid mutex
576 // Per-P GC state
577 gcAssistTime int64 // Nanoseconds in assistAlloc
578 gcFractionalMarkTime int64 // Nanoseconds in fractional mark worker
579 gcBgMarkWorker guintptr
580 gcMarkWorkerMode gcMarkWorkerMode
582 // gcMarkWorkerStartTime is the nanotime() at which this mark
583 // worker started.
584 gcMarkWorkerStartTime int64
586 // gcw is this P's GC work buffer cache. The work buffer is
587 // filled by write barriers, drained by mutator assists, and
588 // disposed on certain GC state transitions.
589 gcw gcWork
591 // wbBuf is this P's GC write barrier buffer.
593 // TODO: Consider caching this in the running G.
594 wbBuf wbBuf
596 runSafePointFn uint32 // if 1, run sched.safePointFn at next safe point
598 pad [sys.CacheLineSize]byte
601 type schedt struct {
602 // accessed atomically. keep at top to ensure alignment on 32-bit systems.
603 goidgen uint64
604 lastpoll uint64
606 lock mutex
608 // When increasing nmidle, nmidlelocked, nmsys, or nmfreed, be
609 // sure to call checkdead().
611 midle muintptr // idle m's waiting for work
612 nmidle int32 // number of idle m's waiting for work
613 nmidlelocked int32 // number of locked m's waiting for work
614 mnext int64 // number of m's that have been created and next M ID
615 maxmcount int32 // maximum number of m's allowed (or die)
616 nmsys int32 // number of system m's not counted for deadlock
617 nmfreed int64 // cumulative number of freed m's
619 ngsys uint32 // number of system goroutines; updated atomically
621 pidle puintptr // idle p's
622 npidle uint32
623 nmspinning uint32 // See "Worker thread parking/unparking" comment in proc.go.
625 // Global runnable queue.
626 runqhead guintptr
627 runqtail guintptr
628 runqsize int32
630 // Global cache of dead G's.
631 gflock mutex
632 gfree *g
633 ngfree int32
635 // Central cache of sudog structs.
636 sudoglock mutex
637 sudogcache *sudog
639 // Central pool of available defer structs.
640 deferlock mutex
641 deferpool *_defer
643 // freem is the list of m's waiting to be freed when their
644 // m.exited is set. Linked through m.freelink.
645 freem *m
647 gcwaiting uint32 // gc is waiting to run
648 stopwait int32
649 stopnote note
650 sysmonwait uint32
651 sysmonnote note
653 // safepointFn should be called on each P at the next GC
654 // safepoint if p.runSafePointFn is set.
655 safePointFn func(*p)
656 safePointWait int32
657 safePointNote note
659 profilehz int32 // cpu profiling rate
661 procresizetime int64 // nanotime() of last change to gomaxprocs
662 totaltime int64 // ∫gomaxprocs dt up to procresizetime
665 // Values for the flags field of a sigTabT.
666 const (
667 _SigNotify = 1 << iota // let signal.Notify have signal, even if from kernel
668 _SigKill // if signal.Notify doesn't take it, exit quietly
669 _SigThrow // if signal.Notify doesn't take it, exit loudly
670 _SigPanic // if the signal is from the kernel, panic
671 _SigDefault // if the signal isn't explicitly requested, don't monitor it
672 _SigGoExit // cause all runtime procs to exit (only used on Plan 9).
673 _SigSetStack // add SA_ONSTACK to libc handler
674 _SigUnblock // always unblock; see blockableSig
675 _SigIgn // _SIG_DFL action is to ignore the signal
678 // Lock-free stack node.
679 // // Also known to export_test.go.
680 type lfnode struct {
681 next uint64
682 pushcnt uintptr
685 type forcegcstate struct {
686 lock mutex
687 g *g
688 idle uint32
691 // startup_random_data holds random bytes initialized at startup. These come from
692 // the ELF AT_RANDOM auxiliary vector (vdso_linux_amd64.go or os_linux_386.go).
693 var startupRandomData []byte
695 // extendRandom extends the random numbers in r[:n] to the whole slice r.
696 // Treats n<0 as n==0.
697 func extendRandom(r []byte, n int) {
698 if n < 0 {
699 n = 0
701 for n < len(r) {
702 // Extend random bits using hash function & time seed
703 w := n
704 if w > 16 {
705 w = 16
707 h := memhash(unsafe.Pointer(&r[n-w]), uintptr(nanotime()), uintptr(w))
708 for i := 0; i < sys.PtrSize && n < len(r); i++ {
709 r[n] = byte(h)
711 h >>= 8
716 // A _defer holds an entry on the list of deferred calls.
717 // If you add a field here, add code to clear it in freedefer.
718 type _defer struct {
719 // The next entry in the stack.
720 link *_defer
722 // The stack variable for the function which called this defer
723 // statement. This is set to true if we are returning from
724 // that function, false if we are panicing through it.
725 frame *bool
727 // The value of the panic stack when this function is
728 // deferred. This function can not recover this value from
729 // the panic stack. This can happen if a deferred function
730 // has a defer statement itself.
731 panicStack *_panic
733 // The panic that caused the defer to run. This is used to
734 // discard panics that have already been handled.
735 _panic *_panic
737 // The function to call.
738 pfn uintptr
740 // The argument to pass to the function.
741 arg unsafe.Pointer
743 // The return address that a recover thunk matches against.
744 // This is set by __go_set_defer_retaddr which is called by
745 // the thunks created by defer statements.
746 retaddr uintptr
748 // Set to true if a function created by reflect.MakeFunc is
749 // permitted to recover. The return address of such a
750 // function function will be somewhere in libffi, so __retaddr
751 // is not useful.
752 makefunccanrecover bool
755 // panics
756 // This is the gccgo version.
757 type _panic struct {
758 // The next entry in the stack.
759 link *_panic
761 // The value associated with this panic.
762 arg interface{}
764 // Whether this panic has been recovered.
765 recovered bool
767 // Whether this panic was pushed on the stack because of an
768 // exception thrown in some other language.
769 isforeign bool
771 // Whether this panic was already seen by a deferred function
772 // which called panic again.
773 aborted bool
776 const (
777 _TraceRuntimeFrames = 1 << iota // include frames for internal runtime functions.
778 _TraceTrap // the initial PC, SP are from a trap, not a return PC from a call
779 _TraceJumpStack // if traceback is on a systemstack, resume trace at g that called into it
782 // The maximum number of frames we print for a traceback
783 const _TracebackMaxFrames = 100
785 var (
786 allglen uintptr
787 allm *m
788 allp []*p // len(allp) == gomaxprocs; may change at safe points, otherwise immutable
789 allpLock mutex // Protects P-less reads of allp and all writes
790 gomaxprocs int32
791 ncpu int32
792 forcegc forcegcstate
793 sched schedt
794 newprocs int32
796 // Information about what cpu features are available.
797 // Set on startup in asm_{x86,amd64}.s.
798 // Packages outside the runtime should not use these
799 // as they are not an external api.
800 cpuid_ecx uint32
801 support_aes bool
803 // cpuid_edx uint32
804 // cpuid_ebx7 uint32
805 // lfenceBeforeRdtsc bool
806 // support_avx bool
807 // support_avx2 bool
808 // support_bmi1 bool
809 // support_bmi2 bool
811 // goarm uint8 // set by cmd/link on arm systems
812 // framepointer_enabled bool // set by cmd/link
815 // Set by the linker so the runtime can determine the buildmode.
816 var (
817 islibrary bool // -buildmode=c-shared
818 isarchive bool // -buildmode=c-archive
821 // Types that are only used by gccgo.
823 // g_ucontext_t is a Go version of the C ucontext_t type, used by getcontext.
824 // _sizeof_ucontext_t is defined by mkrsysinfo.sh from <ucontext.h>.
825 // On some systems getcontext and friends require a value that is
826 // aligned to a 16-byte boundary. We implement this by increasing the
827 // required size and picking an appropriate offset when we use the
828 // array.
829 type g_ucontext_t [(_sizeof_ucontext_t + 15) / unsafe.Sizeof(uintptr(0))]uintptr
831 // sigset is the Go version of the C type sigset_t.
832 // _sigset_t is defined by the Makefile from <signal.h>.
833 type sigset _sigset_t
835 // getMemstats returns a pointer to the internal memstats variable,
836 // for C code.
837 //go:linkname getMemstats runtime.getMemstats
838 func getMemstats() *mstats {
839 return &memstats