* README.Portability: Remove note on an Irix compatibility issue.
[official-gcc.git] / libgo / go / runtime / runtime2.go
blob195d65bbd73d45f8b639c1ce37cdd3feae6ce56e
1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 package runtime
7 import (
8 "runtime/internal/atomic"
9 "runtime/internal/sys"
10 "unsafe"
13 // defined constants
14 const (
15 // G status
17 // Beyond indicating the general state of a G, the G status
18 // acts like a lock on the goroutine's stack (and hence its
19 // ability to execute user code).
21 // If you add to this list, add to the list
22 // of "okay during garbage collection" status
23 // in mgcmark.go too.
25 // _Gidle means this goroutine was just allocated and has not
26 // yet been initialized.
27 _Gidle = iota // 0
29 // _Grunnable means this goroutine is on a run queue. It is
30 // not currently executing user code. The stack is not owned.
31 _Grunnable // 1
33 // _Grunning means this goroutine may execute user code. The
34 // stack is owned by this goroutine. It is not on a run queue.
35 // It is assigned an M and a P.
36 _Grunning // 2
38 // _Gsyscall means this goroutine is executing a system call.
39 // It is not executing user code. The stack is owned by this
40 // goroutine. It is not on a run queue. It is assigned an M.
41 _Gsyscall // 3
43 // _Gwaiting means this goroutine is blocked in the runtime.
44 // It is not executing user code. It is not on a run queue,
45 // but should be recorded somewhere (e.g., a channel wait
46 // queue) so it can be ready()d when necessary. The stack is
47 // not owned *except* that a channel operation may read or
48 // write parts of the stack under the appropriate channel
49 // lock. Otherwise, it is not safe to access the stack after a
50 // goroutine enters _Gwaiting (e.g., it may get moved).
51 _Gwaiting // 4
53 // _Gmoribund_unused is currently unused, but hardcoded in gdb
54 // scripts.
55 _Gmoribund_unused // 5
57 // _Gdead means this goroutine is currently unused. It may be
58 // just exited, on a free list, or just being initialized. It
59 // is not executing user code. It may or may not have a stack
60 // allocated. The G and its stack (if any) are owned by the M
61 // that is exiting the G or that obtained the G from the free
62 // list.
63 _Gdead // 6
65 // _Genqueue_unused is currently unused.
66 _Genqueue_unused // 7
68 // _Gcopystack means this goroutine's stack is being moved. It
69 // is not executing user code and is not on a run queue. The
70 // stack is owned by the goroutine that put it in _Gcopystack.
71 _Gcopystack // 8
73 // _Gscan combined with one of the above states other than
74 // _Grunning indicates that GC is scanning the stack. The
75 // goroutine is not executing user code and the stack is owned
76 // by the goroutine that set the _Gscan bit.
78 // _Gscanrunning is different: it is used to briefly block
79 // state transitions while GC signals the G to scan its own
80 // stack. This is otherwise like _Grunning.
82 // atomicstatus&~Gscan gives the state the goroutine will
83 // return to when the scan completes.
84 _Gscan = 0x1000
85 _Gscanrunnable = _Gscan + _Grunnable // 0x1001
86 _Gscanrunning = _Gscan + _Grunning // 0x1002
87 _Gscansyscall = _Gscan + _Gsyscall // 0x1003
88 _Gscanwaiting = _Gscan + _Gwaiting // 0x1004
91 const (
92 // P status
93 _Pidle = iota
94 _Prunning // Only this P is allowed to change from _Prunning.
95 _Psyscall
96 _Pgcstop
97 _Pdead
100 // Mutual exclusion locks. In the uncontended case,
101 // as fast as spin locks (just a few user-level instructions),
102 // but on the contention path they sleep in the kernel.
103 // A zeroed Mutex is unlocked (no need to initialize each lock).
104 type mutex struct {
105 // Futex-based impl treats it as uint32 key,
106 // while sema-based impl as M* waitm.
107 // Used to be a union, but unions break precise GC.
108 key uintptr
111 // sleep and wakeup on one-time events.
112 // before any calls to notesleep or notewakeup,
113 // must call noteclear to initialize the Note.
114 // then, exactly one thread can call notesleep
115 // and exactly one thread can call notewakeup (once).
116 // once notewakeup has been called, the notesleep
117 // will return. future notesleep will return immediately.
118 // subsequent noteclear must be called only after
119 // previous notesleep has returned, e.g. it's disallowed
120 // to call noteclear straight after notewakeup.
122 // notetsleep is like notesleep but wakes up after
123 // a given number of nanoseconds even if the event
124 // has not yet happened. if a goroutine uses notetsleep to
125 // wake up early, it must wait to call noteclear until it
126 // can be sure that no other goroutine is calling
127 // notewakeup.
129 // notesleep/notetsleep are generally called on g0,
130 // notetsleepg is similar to notetsleep but is called on user g.
131 type note struct {
132 // Futex-based impl treats it as uint32 key,
133 // while sema-based impl as M* waitm.
134 // Used to be a union, but unions break precise GC.
135 key uintptr
138 type funcval struct {
139 fn uintptr
140 // variable-size, fn-specific data here
143 // The representation of a non-empty interface.
144 // See comment in iface.go for more details on this struct.
145 type iface struct {
146 tab unsafe.Pointer
147 data unsafe.Pointer
150 // The representation of an empty interface.
151 // See comment in iface.go for more details on this struct.
152 type eface struct {
153 _type *_type
154 data unsafe.Pointer
157 func efaceOf(ep *interface{}) *eface {
158 return (*eface)(unsafe.Pointer(ep))
161 // The guintptr, muintptr, and puintptr are all used to bypass write barriers.
162 // It is particularly important to avoid write barriers when the current P has
163 // been released, because the GC thinks the world is stopped, and an
164 // unexpected write barrier would not be synchronized with the GC,
165 // which can lead to a half-executed write barrier that has marked the object
166 // but not queued it. If the GC skips the object and completes before the
167 // queuing can occur, it will incorrectly free the object.
169 // We tried using special assignment functions invoked only when not
170 // holding a running P, but then some updates to a particular memory
171 // word went through write barriers and some did not. This breaks the
172 // write barrier shadow checking mode, and it is also scary: better to have
173 // a word that is completely ignored by the GC than to have one for which
174 // only a few updates are ignored.
176 // Gs, Ms, and Ps are always reachable via true pointers in the
177 // allgs, allm, and allp lists or (during allocation before they reach those lists)
178 // from stack variables.
180 // A guintptr holds a goroutine pointer, but typed as a uintptr
181 // to bypass write barriers. It is used in the Gobuf goroutine state
182 // and in scheduling lists that are manipulated without a P.
184 // The Gobuf.g goroutine pointer is almost always updated by assembly code.
185 // In one of the few places it is updated by Go code - func save - it must be
186 // treated as a uintptr to avoid a write barrier being emitted at a bad time.
187 // Instead of figuring out how to emit the write barriers missing in the
188 // assembly manipulation, we change the type of the field to uintptr,
189 // so that it does not require write barriers at all.
191 // Goroutine structs are published in the allg list and never freed.
192 // That will keep the goroutine structs from being collected.
193 // There is never a time that Gobuf.g's contain the only references
194 // to a goroutine: the publishing of the goroutine in allg comes first.
195 // Goroutine pointers are also kept in non-GC-visible places like TLS,
196 // so I can't see them ever moving. If we did want to start moving data
197 // in the GC, we'd need to allocate the goroutine structs from an
198 // alternate arena. Using guintptr doesn't make that problem any worse.
199 type guintptr uintptr
201 //go:nosplit
202 func (gp guintptr) ptr() *g { return (*g)(unsafe.Pointer(gp)) }
204 //go:nosplit
205 func (gp *guintptr) set(g *g) { *gp = guintptr(unsafe.Pointer(g)) }
207 //go:nosplit
208 func (gp *guintptr) cas(old, new guintptr) bool {
209 return atomic.Casuintptr((*uintptr)(unsafe.Pointer(gp)), uintptr(old), uintptr(new))
212 // setGNoWB performs *gp = new without a write barrier.
213 // For times when it's impractical to use a guintptr.
214 //go:nosplit
215 //go:nowritebarrier
216 func setGNoWB(gp **g, new *g) {
217 (*guintptr)(unsafe.Pointer(gp)).set(new)
220 type puintptr uintptr
222 //go:nosplit
223 func (pp puintptr) ptr() *p { return (*p)(unsafe.Pointer(pp)) }
225 //go:nosplit
226 func (pp *puintptr) set(p *p) { *pp = puintptr(unsafe.Pointer(p)) }
228 type muintptr uintptr
230 //go:nosplit
231 func (mp muintptr) ptr() *m { return (*m)(unsafe.Pointer(mp)) }
233 //go:nosplit
234 func (mp *muintptr) set(m *m) { *mp = muintptr(unsafe.Pointer(m)) }
236 // setMNoWB performs *mp = new without a write barrier.
237 // For times when it's impractical to use an muintptr.
238 //go:nosplit
239 //go:nowritebarrier
240 func setMNoWB(mp **m, new *m) {
241 (*muintptr)(unsafe.Pointer(mp)).set(new)
244 // sudog represents a g in a wait list, such as for sending/receiving
245 // on a channel.
247 // sudog is necessary because the g ↔ synchronization object relation
248 // is many-to-many. A g can be on many wait lists, so there may be
249 // many sudogs for one g; and many gs may be waiting on the same
250 // synchronization object, so there may be many sudogs for one object.
252 // sudogs are allocated from a special pool. Use acquireSudog and
253 // releaseSudog to allocate and free them.
254 type sudog struct {
255 // The following fields are protected by the hchan.lock of the
256 // channel this sudog is blocking on. shrinkstack depends on
257 // this.
259 g *g
260 selectdone *uint32 // CAS to 1 to win select race (may point to stack)
261 next *sudog
262 prev *sudog
263 elem unsafe.Pointer // data element (may point to stack)
265 // The following fields are never accessed concurrently.
266 // waitlink is only accessed by g.
268 acquiretime int64
269 releasetime int64
270 ticket uint32
271 waitlink *sudog // g.waiting list
272 c *hchan // channel
275 type gcstats struct {
276 // the struct must consist of only uint64's,
277 // because it is casted to uint64[].
278 nhandoff uint64
279 nhandoffcnt uint64
280 nprocyield uint64
281 nosyield uint64
282 nsleep uint64
286 Not used by gccgo.
288 type libcall struct {
289 fn uintptr
290 n uintptr // number of parameters
291 args uintptr // parameters
292 r1 uintptr // return values
293 r2 uintptr
294 err uintptr // error number
300 Not used by gccgo.
302 // describes how to handle callback
303 type wincallbackcontext struct {
304 gobody unsafe.Pointer // go function to call
305 argsize uintptr // callback arguments size (in bytes)
306 restorestack uintptr // adjust stack on return by (in bytes) (386 only)
307 cleanstack bool
312 Not used by gccgo.
314 // Stack describes a Go execution stack.
315 // The bounds of the stack are exactly [lo, hi),
316 // with no implicit data structures on either side.
317 type stack struct {
318 lo uintptr
319 hi uintptr
322 // stkbar records the state of a G's stack barrier.
323 type stkbar struct {
324 savedLRPtr uintptr // location overwritten by stack barrier PC
325 savedLRVal uintptr // value overwritten at savedLRPtr
329 type g struct {
330 // Stack parameters.
331 // stack describes the actual stack memory: [stack.lo, stack.hi).
332 // stackguard0 is the stack pointer compared in the Go stack growth prologue.
333 // It is stack.lo+StackGuard normally, but can be StackPreempt to trigger a preemption.
334 // stackguard1 is the stack pointer compared in the C stack growth prologue.
335 // It is stack.lo+StackGuard on g0 and gsignal stacks.
336 // It is ~0 on other goroutine stacks, to trigger a call to morestackc (and crash).
337 // Not for gccgo: stack stack // offset known to runtime/cgo
338 // Not for gccgo: stackguard0 uintptr // offset known to liblink
339 // Not for gccgo: stackguard1 uintptr // offset known to liblink
341 _panic *_panic // innermost panic - offset known to liblink
342 _defer *_defer // innermost defer
343 m *m // current m; offset known to arm liblink
344 // Not for gccgo: stackAlloc uintptr // stack allocation is [stack.lo,stack.lo+stackAlloc)
345 // Not for gccgo: sched gobuf
346 syscallsp uintptr // if status==Gsyscall, syscallsp = sched.sp to use during gc
347 syscallpc uintptr // if status==Gsyscall, syscallpc = sched.pc to use during gc
348 // Not for gccgo: stkbar []stkbar // stack barriers, from low to high (see top of mstkbar.go)
349 // Not for gccgo: stkbarPos uintptr // index of lowest stack barrier not hit
350 // Not for gccgo: stktopsp uintptr // expected sp at top of stack, to check in traceback
351 param unsafe.Pointer // passed parameter on wakeup
352 atomicstatus uint32
353 // Not for gccgo: stackLock uint32 // sigprof/scang lock; TODO: fold in to atomicstatus
354 goid int64
355 waitsince int64 // approx time when the g become blocked
356 waitreason string // if status==Gwaiting
357 schedlink guintptr
358 preempt bool // preemption signal, duplicates stackguard0 = stackpreempt
359 paniconfault bool // panic (instead of crash) on unexpected fault address
360 preemptscan bool // preempted g does scan for gc
361 gcscandone bool // g has scanned stack; protected by _Gscan bit in status
362 gcscanvalid bool // false at start of gc cycle, true if G has not run since last scan; transition from true to false by calling queueRescan and false to true by calling dequeueRescan
363 throwsplit bool // must not split stack
364 raceignore int8 // ignore race detection events
365 sysblocktraced bool // StartTrace has emitted EvGoInSyscall about this goroutine
366 sysexitticks int64 // cputicks when syscall has returned (for tracing)
367 traceseq uint64 // trace event sequencer
368 tracelastp puintptr // last P emitted an event for this goroutine
369 lockedm *m
370 sig uint32
371 writebuf []byte
372 sigcode0 uintptr
373 sigcode1 uintptr
374 sigpc uintptr
375 gopc uintptr // pc of go statement that created this goroutine
376 startpc uintptr // pc of goroutine function
377 // Not for gccgo: racectx uintptr
378 waiting *sudog // sudog structures this g is waiting on (that have a valid elem ptr); in lock order
379 // Not for gccgo: cgoCtxt []uintptr // cgo traceback context
381 // Per-G GC state
383 // gcRescan is this G's index in work.rescan.list. If this is
384 // -1, this G is not on the rescan list.
386 // If gcphase != _GCoff and this G is visible to the garbage
387 // collector, writes to this are protected by work.rescan.lock.
388 gcRescan int32
390 // gcAssistBytes is this G's GC assist credit in terms of
391 // bytes allocated. If this is positive, then the G has credit
392 // to allocate gcAssistBytes bytes without assisting. If this
393 // is negative, then the G must correct this by performing
394 // scan work. We track this in bytes to make it fast to update
395 // and check for debt in the malloc hot path. The assist ratio
396 // determines how this corresponds to scan work debt.
397 gcAssistBytes int64
399 // Remaining fields are specific to gccgo.
401 exception unsafe.Pointer // current exception being thrown
402 isforeign bool // whether current exception is not from Go
404 // Fields that hold stack and context information if status is Gsyscall
405 gcstack unsafe.Pointer
406 gcstacksize uintptr
407 gcnextsegment unsafe.Pointer
408 gcnextsp unsafe.Pointer
409 gcinitialsp unsafe.Pointer
410 gcregs g_ucontext_t
412 entry unsafe.Pointer // goroutine entry point
413 fromgogo bool // whether entered from gogo function
415 issystem bool // do not output in stack dump
416 isbackground bool // ignore in deadlock detector
418 traceback *tracebackg // stack traceback buffer
420 context g_ucontext_t // saved context for setcontext
421 stackcontext [10]unsafe.Pointer // split-stack context
424 type m struct {
425 g0 *g // goroutine with scheduling stack
426 // Not for gccgo: morebuf gobuf // gobuf arg to morestack
427 // Not for gccgo: divmod uint32 // div/mod denominator for arm - known to liblink
429 // Fields not known to debuggers.
430 procid uint64 // for debuggers, but offset not hard-coded
431 gsignal *g // signal-handling g
432 sigmask sigset // storage for saved signal mask
433 // Not for gccgo: tls [6]uintptr // thread-local storage (for x86 extern register)
434 mstartfn uintptr
435 curg *g // current running goroutine
436 caughtsig guintptr // goroutine running during fatal signal
437 p puintptr // attached p for executing go code (nil if not executing go code)
438 nextp puintptr
439 id int32
440 mallocing int32
441 throwing int32
442 preemptoff string // if != "", keep curg running on this m
443 locks int32
444 softfloat int32
445 dying int32
446 profilehz int32
447 helpgc int32
448 spinning bool // m is out of work and is actively looking for work
449 blocked bool // m is blocked on a note
450 inwb bool // m is executing a write barrier
451 newSigstack bool // minit on C thread called sigaltstack
452 printlock int8
453 fastrand uint32
454 ncgocall uint64 // number of cgo calls in total
455 ncgo int32 // number of cgo calls currently in progress
456 // Not for gccgo: cgoCallersUse uint32 // if non-zero, cgoCallers in use temporarily
457 // Not for gccgo: cgoCallers *cgoCallers // cgo traceback if crashing in cgo call
458 park note
459 alllink *m // on allm
460 schedlink muintptr
461 mcache *mcache
462 lockedg *g
463 createstack [32]location // stack that created this thread.
464 // Not for gccgo: freglo [16]uint32 // d[i] lsb and f[i]
465 // Not for gccgo: freghi [16]uint32 // d[i] msb and f[i+16]
466 // Not for gccgo: fflag uint32 // floating point compare flags
467 locked uint32 // tracking for lockosthread
468 nextwaitm uintptr // next m waiting for lock
469 gcstats gcstats
470 needextram bool
471 traceback uint8
472 waitunlockf unsafe.Pointer // todo go func(*g, unsafe.pointer) bool
473 waitlock unsafe.Pointer
474 waittraceev byte
475 waittraceskip int
476 startingtrace bool
477 syscalltick uint32
478 // Not for gccgo: thread uintptr // thread handle
480 // these are here because they are too large to be on the stack
481 // of low-level NOSPLIT functions.
482 // Not for gccgo: libcall libcall
483 // Not for gccgo: libcallpc uintptr // for cpu profiler
484 // Not for gccgo: libcallsp uintptr
485 // Not for gccgo: libcallg guintptr
486 // Not for gccgo: syscall libcall // stores syscall parameters on windows
488 mos mOS
490 // Remaining fields are specific to gccgo.
492 gsignalstack unsafe.Pointer // stack for gsignal
493 gsignalstacksize uintptr
495 dropextram bool // drop after call is done
497 gcing int32
500 type p struct {
501 lock mutex
503 id int32
504 status uint32 // one of pidle/prunning/...
505 link puintptr
506 schedtick uint32 // incremented on every scheduler call
507 syscalltick uint32 // incremented on every system call
508 m muintptr // back-link to associated m (nil if idle)
509 mcache *mcache
510 // Not for gccgo: racectx uintptr
512 // gccgo has only one size of defer.
513 deferpool []*_defer
514 deferpoolbuf [32]*_defer
516 // Cache of goroutine ids, amortizes accesses to runtime·sched.goidgen.
517 goidcache uint64
518 goidcacheend uint64
520 // Queue of runnable goroutines. Accessed without lock.
521 runqhead uint32
522 runqtail uint32
523 runq [256]guintptr
524 // runnext, if non-nil, is a runnable G that was ready'd by
525 // the current G and should be run next instead of what's in
526 // runq if there's time remaining in the running G's time
527 // slice. It will inherit the time left in the current time
528 // slice. If a set of goroutines is locked in a
529 // communicate-and-wait pattern, this schedules that set as a
530 // unit and eliminates the (potentially large) scheduling
531 // latency that otherwise arises from adding the ready'd
532 // goroutines to the end of the run queue.
533 runnext guintptr
535 // Available G's (status == Gdead)
536 gfree *g
537 gfreecnt int32
539 sudogcache []*sudog
540 sudogbuf [128]*sudog
542 tracebuf traceBufPtr
544 // Not for gccgo for now: palloc persistentAlloc // per-P to avoid mutex
546 // Per-P GC state
547 gcAssistTime int64 // Nanoseconds in assistAlloc
548 gcBgMarkWorker guintptr
549 gcMarkWorkerMode gcMarkWorkerMode
551 // gcw is this P's GC work buffer cache. The work buffer is
552 // filled by write barriers, drained by mutator assists, and
553 // disposed on certain GC state transitions.
554 // Not for gccgo for now: gcw gcWork
556 runSafePointFn uint32 // if 1, run sched.safePointFn at next safe point
558 pad [sys.CacheLineSize]byte
561 const (
562 // The max value of GOMAXPROCS.
563 // There are no fundamental restrictions on the value.
564 _MaxGomaxprocs = 1 << 8
567 type schedt struct {
568 // accessed atomically. keep at top to ensure alignment on 32-bit systems.
569 goidgen uint64
570 lastpoll uint64
572 lock mutex
574 midle muintptr // idle m's waiting for work
575 nmidle int32 // number of idle m's waiting for work
576 nmidlelocked int32 // number of locked m's waiting for work
577 mcount int32 // number of m's that have been created
578 maxmcount int32 // maximum number of m's allowed (or die)
580 ngsys uint32 // number of system goroutines; updated atomically
582 pidle puintptr // idle p's
583 npidle uint32
584 nmspinning uint32 // See "Worker thread parking/unparking" comment in proc.go.
586 // Global runnable queue.
587 runqhead guintptr
588 runqtail guintptr
589 runqsize int32
591 // Global cache of dead G's.
592 gflock mutex
593 gfree *g
594 ngfree int32
596 // Central cache of sudog structs.
597 sudoglock mutex
598 sudogcache *sudog
600 // Central pool of available defer structs.
601 deferlock mutex
602 deferpool *_defer
604 gcwaiting uint32 // gc is waiting to run
605 stopwait int32
606 stopnote note
607 sysmonwait uint32
608 sysmonnote note
610 // safepointFn should be called on each P at the next GC
611 // safepoint if p.runSafePointFn is set.
612 safePointFn func(*p)
613 safePointWait int32
614 safePointNote note
616 profilehz int32 // cpu profiling rate
618 procresizetime int64 // nanotime() of last change to gomaxprocs
619 totaltime int64 // ∫gomaxprocs dt up to procresizetime
622 // The m.locked word holds two pieces of state counting active calls to LockOSThread/lockOSThread.
623 // The low bit (LockExternal) is a boolean reporting whether any LockOSThread call is active.
624 // External locks are not recursive; a second lock is silently ignored.
625 // The upper bits of m.locked record the nesting depth of calls to lockOSThread
626 // (counting up by LockInternal), popped by unlockOSThread (counting down by LockInternal).
627 // Internal locks can be recursive. For instance, a lock for cgo can occur while the main
628 // goroutine is holding the lock during the initialization phase.
629 const (
630 _LockExternal = 1
631 _LockInternal = 2
634 const (
635 _SigNotify = 1 << iota // let signal.Notify have signal, even if from kernel
636 _SigKill // if signal.Notify doesn't take it, exit quietly
637 _SigThrow // if signal.Notify doesn't take it, exit loudly
638 _SigPanic // if the signal is from the kernel, panic
639 _SigDefault // if the signal isn't explicitly requested, don't monitor it
640 _SigHandling // our signal handler is registered
641 _SigGoExit // cause all runtime procs to exit (only used on Plan 9).
642 _SigSetStack // add SA_ONSTACK to libc handler
643 _SigUnblock // unblocked in minit
646 // Lock-free stack node.
647 // // Also known to export_test.go.
648 type lfnode struct {
649 next uint64
650 pushcnt uintptr
653 type forcegcstate struct {
654 lock mutex
655 g *g
656 idle uint32
659 // startup_random_data holds random bytes initialized at startup. These come from
660 // the ELF AT_RANDOM auxiliary vector (vdso_linux_amd64.go or os_linux_386.go).
661 var startupRandomData []byte
663 // extendRandom extends the random numbers in r[:n] to the whole slice r.
664 // Treats n<0 as n==0.
665 func extendRandom(r []byte, n int) {
666 if n < 0 {
667 n = 0
669 for n < len(r) {
670 // Extend random bits using hash function & time seed
671 w := n
672 if w > 16 {
673 w = 16
675 h := memhash(unsafe.Pointer(&r[n-w]), uintptr(nanotime()), uintptr(w))
676 for i := 0; i < sys.PtrSize && n < len(r); i++ {
677 r[n] = byte(h)
679 h >>= 8
684 // deferred subroutine calls
685 // This is the gccgo version.
686 type _defer struct {
687 // The next entry in the stack.
688 link *_defer
690 // The stack variable for the function which called this defer
691 // statement. This is set to true if we are returning from
692 // that function, false if we are panicing through it.
693 frame *bool
695 // The value of the panic stack when this function is
696 // deferred. This function can not recover this value from
697 // the panic stack. This can happen if a deferred function
698 // has a defer statement itself.
699 _panic *_panic
701 // The function to call.
702 pfn uintptr
704 // The argument to pass to the function.
705 arg unsafe.Pointer
707 // The return address that a recover thunk matches against.
708 // This is set by __go_set_defer_retaddr which is called by
709 // the thunks created by defer statements.
710 retaddr uintptr
712 // Set to true if a function created by reflect.MakeFunc is
713 // permitted to recover. The return address of such a
714 // function function will be somewhere in libffi, so __retaddr
715 // is not useful.
716 makefunccanrecover bool
718 // Set to true if this defer stack entry is not part of the
719 // defer pool.
720 special bool
723 // panics
724 // This is the gccgo version.
725 type _panic struct {
726 // The next entry in the stack.
727 link *_panic
729 // The value associated with this panic.
730 arg interface{}
732 // Whether this panic has been recovered.
733 recovered bool
735 // Whether this panic was pushed on the stack because of an
736 // exception thrown in some other language.
737 isforeign bool
740 const (
741 _TraceRuntimeFrames = 1 << iota // include frames for internal runtime functions.
742 _TraceTrap // the initial PC, SP are from a trap, not a return PC from a call
743 _TraceJumpStack // if traceback is on a systemstack, resume trace at g that called into it
746 // The maximum number of frames we print for a traceback
747 const _TracebackMaxFrames = 100
749 var (
750 // emptystring string
752 allglen uintptr
753 allm *m
754 allp [_MaxGomaxprocs + 1]*p
755 gomaxprocs int32
756 panicking uint32
757 ncpu int32
758 forcegc forcegcstate
759 sched schedt
760 newprocs int32
762 // Information about what cpu features are available.
763 // Set on startup in asm_{x86,amd64}.s.
764 cpuid_ecx uint32
765 support_aes bool
767 // cpuid_edx uint32
768 // cpuid_ebx7 uint32
769 // lfenceBeforeRdtsc bool
770 // support_avx bool
771 // support_avx2 bool
772 // support_bmi1 bool
773 // support_bmi2 bool
775 // goarm uint8 // set by cmd/link on arm systems
776 // framepointer_enabled bool // set by cmd/link
779 // Set by the linker so the runtime can determine the buildmode.
780 var (
781 islibrary bool // -buildmode=c-shared
782 isarchive bool // -buildmode=c-archive
785 // Types that are only used by gccgo.
787 // g_ucontext_t is a Go version of the C ucontext_t type, used by getcontext.
788 // _sizeof_ucontext_t is defined by mkrsysinfo.sh from <ucontext.h>.
789 // On some systems getcontext and friends require a value that is
790 // aligned to a 16-byte boundary. We implement this by increasing the
791 // required size and picking an appropriate offset when we use the
792 // array.
793 type g_ucontext_t [(_sizeof_ucontext_t + 15) / unsafe.Sizeof(unsafe.Pointer(nil))]unsafe.Pointer
795 // sigset is the Go version of the C type sigset_t.
796 // _sigset_t is defined by the Makefile from <signal.h>.
797 type sigset _sigset_t