1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
8 "runtime/internal/atomic"
17 // Beyond indicating the general state of a G, the G status
18 // acts like a lock on the goroutine's stack (and hence its
19 // ability to execute user code).
21 // If you add to this list, add to the list
22 // of "okay during garbage collection" status
25 // _Gidle means this goroutine was just allocated and has not
26 // yet been initialized.
29 // _Grunnable means this goroutine is on a run queue. It is
30 // not currently executing user code. The stack is not owned.
33 // _Grunning means this goroutine may execute user code. The
34 // stack is owned by this goroutine. It is not on a run queue.
35 // It is assigned an M and a P.
38 // _Gsyscall means this goroutine is executing a system call.
39 // It is not executing user code. The stack is owned by this
40 // goroutine. It is not on a run queue. It is assigned an M.
43 // _Gwaiting means this goroutine is blocked in the runtime.
44 // It is not executing user code. It is not on a run queue,
45 // but should be recorded somewhere (e.g., a channel wait
46 // queue) so it can be ready()d when necessary. The stack is
47 // not owned *except* that a channel operation may read or
48 // write parts of the stack under the appropriate channel
49 // lock. Otherwise, it is not safe to access the stack after a
50 // goroutine enters _Gwaiting (e.g., it may get moved).
53 // _Gmoribund_unused is currently unused, but hardcoded in gdb
55 _Gmoribund_unused
// 5
57 // _Gdead means this goroutine is currently unused. It may be
58 // just exited, on a free list, or just being initialized. It
59 // is not executing user code. It may or may not have a stack
60 // allocated. The G and its stack (if any) are owned by the M
61 // that is exiting the G or that obtained the G from the free
65 // _Genqueue_unused is currently unused.
68 // _Gcopystack means this goroutine's stack is being moved. It
69 // is not executing user code and is not on a run queue. The
70 // stack is owned by the goroutine that put it in _Gcopystack.
73 // _Gscan combined with one of the above states other than
74 // _Grunning indicates that GC is scanning the stack. The
75 // goroutine is not executing user code and the stack is owned
76 // by the goroutine that set the _Gscan bit.
78 // _Gscanrunning is different: it is used to briefly block
79 // state transitions while GC signals the G to scan its own
80 // stack. This is otherwise like _Grunning.
82 // atomicstatus&~Gscan gives the state the goroutine will
83 // return to when the scan completes.
85 _Gscanrunnable
= _Gscan
+ _Grunnable
// 0x1001
86 _Gscanrunning
= _Gscan
+ _Grunning
// 0x1002
87 _Gscansyscall
= _Gscan
+ _Gsyscall
// 0x1003
88 _Gscanwaiting
= _Gscan
+ _Gwaiting
// 0x1004
94 _Prunning
// Only this P is allowed to change from _Prunning.
100 // Mutual exclusion locks. In the uncontended case,
101 // as fast as spin locks (just a few user-level instructions),
102 // but on the contention path they sleep in the kernel.
103 // A zeroed Mutex is unlocked (no need to initialize each lock).
105 // Futex-based impl treats it as uint32 key,
106 // while sema-based impl as M* waitm.
107 // Used to be a union, but unions break precise GC.
111 // sleep and wakeup on one-time events.
112 // before any calls to notesleep or notewakeup,
113 // must call noteclear to initialize the Note.
114 // then, exactly one thread can call notesleep
115 // and exactly one thread can call notewakeup (once).
116 // once notewakeup has been called, the notesleep
117 // will return. future notesleep will return immediately.
118 // subsequent noteclear must be called only after
119 // previous notesleep has returned, e.g. it's disallowed
120 // to call noteclear straight after notewakeup.
122 // notetsleep is like notesleep but wakes up after
123 // a given number of nanoseconds even if the event
124 // has not yet happened. if a goroutine uses notetsleep to
125 // wake up early, it must wait to call noteclear until it
126 // can be sure that no other goroutine is calling
129 // notesleep/notetsleep are generally called on g0,
130 // notetsleepg is similar to notetsleep but is called on user g.
132 // Futex-based impl treats it as uint32 key,
133 // while sema-based impl as M* waitm.
134 // Used to be a union, but unions break precise GC.
138 type funcval
struct {
140 // variable-size, fn-specific data here
143 // The representation of a non-empty interface.
144 // See comment in iface.go for more details on this struct.
150 // The representation of an empty interface.
151 // See comment in iface.go for more details on this struct.
157 func efaceOf(ep
*interface{}) *eface
{
158 return (*eface
)(unsafe
.Pointer(ep
))
161 // The guintptr, muintptr, and puintptr are all used to bypass write barriers.
162 // It is particularly important to avoid write barriers when the current P has
163 // been released, because the GC thinks the world is stopped, and an
164 // unexpected write barrier would not be synchronized with the GC,
165 // which can lead to a half-executed write barrier that has marked the object
166 // but not queued it. If the GC skips the object and completes before the
167 // queuing can occur, it will incorrectly free the object.
169 // We tried using special assignment functions invoked only when not
170 // holding a running P, but then some updates to a particular memory
171 // word went through write barriers and some did not. This breaks the
172 // write barrier shadow checking mode, and it is also scary: better to have
173 // a word that is completely ignored by the GC than to have one for which
174 // only a few updates are ignored.
176 // Gs and Ps are always reachable via true pointers in the
177 // allgs and allp lists or (during allocation before they reach those lists)
178 // from stack variables.
180 // Ms are always reachable via true pointers either from allm or
181 // freem. Unlike Gs and Ps we do free Ms, so it's important that
182 // nothing ever hold an muintptr across a safe point.
184 // A guintptr holds a goroutine pointer, but typed as a uintptr
185 // to bypass write barriers. It is used in the Gobuf goroutine state
186 // and in scheduling lists that are manipulated without a P.
188 // The Gobuf.g goroutine pointer is almost always updated by assembly code.
189 // In one of the few places it is updated by Go code - func save - it must be
190 // treated as a uintptr to avoid a write barrier being emitted at a bad time.
191 // Instead of figuring out how to emit the write barriers missing in the
192 // assembly manipulation, we change the type of the field to uintptr,
193 // so that it does not require write barriers at all.
195 // Goroutine structs are published in the allg list and never freed.
196 // That will keep the goroutine structs from being collected.
197 // There is never a time that Gobuf.g's contain the only references
198 // to a goroutine: the publishing of the goroutine in allg comes first.
199 // Goroutine pointers are also kept in non-GC-visible places like TLS,
200 // so I can't see them ever moving. If we did want to start moving data
201 // in the GC, we'd need to allocate the goroutine structs from an
202 // alternate arena. Using guintptr doesn't make that problem any worse.
203 type guintptr
uintptr
206 func (gp guintptr
) ptr() *g
{ return (*g
)(unsafe
.Pointer(gp
)) }
209 func (gp
*guintptr
) set(g
*g
) { *gp
= guintptr(unsafe
.Pointer(g
)) }
212 func (gp
*guintptr
) cas(old
, new guintptr
) bool {
213 return atomic
.Casuintptr((*uintptr)(unsafe
.Pointer(gp
)), uintptr(old
), uintptr(new))
216 // setGNoWB performs *gp = new without a write barrier.
217 // For times when it's impractical to use a guintptr.
220 func setGNoWB(gp
**g
, new *g
) {
221 (*guintptr
)(unsafe
.Pointer(gp
)).set(new)
224 type puintptr
uintptr
227 func (pp puintptr
) ptr() *p
{ return (*p
)(unsafe
.Pointer(pp
)) }
230 func (pp
*puintptr
) set(p
*p
) { *pp
= puintptr(unsafe
.Pointer(p
)) }
232 // muintptr is a *m that is not tracked by the garbage collector.
234 // Because we do free Ms, there are some additional constrains on
237 // 1. Never hold an muintptr locally across a safe point.
239 // 2. Any muintptr in the heap must be owned by the M itself so it can
240 // ensure it is not in use when the last true *m is released.
241 type muintptr
uintptr
244 func (mp muintptr
) ptr() *m
{ return (*m
)(unsafe
.Pointer(mp
)) }
247 func (mp
*muintptr
) set(m
*m
) { *mp
= muintptr(unsafe
.Pointer(m
)) }
249 // setMNoWB performs *mp = new without a write barrier.
250 // For times when it's impractical to use an muintptr.
253 func setMNoWB(mp
**m
, new *m
) {
254 (*muintptr
)(unsafe
.Pointer(mp
)).set(new)
257 // sudog represents a g in a wait list, such as for sending/receiving
260 // sudog is necessary because the g ↔ synchronization object relation
261 // is many-to-many. A g can be on many wait lists, so there may be
262 // many sudogs for one g; and many gs may be waiting on the same
263 // synchronization object, so there may be many sudogs for one object.
265 // sudogs are allocated from a special pool. Use acquireSudog and
266 // releaseSudog to allocate and free them.
268 // The following fields are protected by the hchan.lock of the
269 // channel this sudog is blocking on. shrinkstack depends on
270 // this for sudogs involved in channel ops.
274 // isSelect indicates g is participating in a select, so
275 // g.selectDone must be CAS'd to win the wake-up race.
279 elem unsafe
.Pointer
// data element (may point to stack)
281 // The following fields are never accessed concurrently.
282 // For channels, waitlink is only accessed by g.
283 // For semaphores, all fields (including the ones above)
284 // are only accessed when holding a semaRoot lock.
289 parent
*sudog
// semaRoot binary tree
290 waitlink
*sudog
// g.waiting list or semaRoot
291 waittail
*sudog
// semaRoot
298 type libcall struct {
300 n uintptr // number of parameters
301 args uintptr // parameters
302 r1 uintptr // return values
304 err uintptr // error number
312 // describes how to handle callback
313 type wincallbackcontext struct {
314 gobody unsafe.Pointer // go function to call
315 argsize uintptr // callback arguments size (in bytes)
316 restorestack uintptr // adjust stack on return by (in bytes) (386 only)
324 // Stack describes a Go execution stack.
325 // The bounds of the stack are exactly [lo, hi),
326 // with no implicit data structures on either side.
335 // stack describes the actual stack memory: [stack.lo, stack.hi).
336 // stackguard0 is the stack pointer compared in the Go stack growth prologue.
337 // It is stack.lo+StackGuard normally, but can be StackPreempt to trigger a preemption.
338 // stackguard1 is the stack pointer compared in the C stack growth prologue.
339 // It is stack.lo+StackGuard on g0 and gsignal stacks.
340 // It is ~0 on other goroutine stacks, to trigger a call to morestackc (and crash).
341 // Not for gccgo: stack stack // offset known to runtime/cgo
342 // Not for gccgo: stackguard0 uintptr // offset known to liblink
343 // Not for gccgo: stackguard1 uintptr // offset known to liblink
345 _panic
*_panic
// innermost panic - offset known to liblink
346 _defer
*_defer
// innermost defer
347 m
*m
// current m; offset known to arm liblink
348 // Not for gccgo: sched gobuf
349 syscallsp
uintptr // if status==Gsyscall, syscallsp = sched.sp to use during gc
350 syscallpc
uintptr // if status==Gsyscall, syscallpc = sched.pc to use during gc
351 // Not for gccgo: stktopsp uintptr // expected sp at top of stack, to check in traceback
352 param unsafe
.Pointer
// passed parameter on wakeup
354 // Not for gccgo: stackLock uint32 // sigprof/scang lock; TODO: fold in to atomicstatus
356 waitsince
int64 // approx time when the g become blocked
357 waitreason
string // if status==Gwaiting
359 preempt
bool // preemption signal, duplicates stackguard0 = stackpreempt
360 paniconfault
bool // panic (instead of crash) on unexpected fault address
361 preemptscan
bool // preempted g does scan for gc
362 gcscandone
bool // g has scanned stack; protected by _Gscan bit in status
363 gcscanvalid
bool // false at start of gc cycle, true if G has not run since last scan; TODO: remove?
364 throwsplit
bool // must not split stack
365 raceignore
int8 // ignore race detection events
366 sysblocktraced
bool // StartTrace has emitted EvGoInSyscall about this goroutine
367 sysexitticks
int64 // cputicks when syscall has returned (for tracing)
368 traceseq
uint64 // trace event sequencer
369 tracelastp puintptr
// last P emitted an event for this goroutine
376 gopc
uintptr // pc of go statement that created this goroutine
377 startpc
uintptr // pc of goroutine function
378 // Not for gccgo: racectx uintptr
379 waiting
*sudog
// sudog structures this g is waiting on (that have a valid elem ptr); in lock order
380 // Not for gccgo: cgoCtxt []uintptr // cgo traceback context
381 labels unsafe
.Pointer
// profiler labels
382 timer
*timer
// cached timer for time.Sleep
383 selectDone
uint32 // are we participating in a select and did someone win the race?
387 // gcAssistBytes is this G's GC assist credit in terms of
388 // bytes allocated. If this is positive, then the G has credit
389 // to allocate gcAssistBytes bytes without assisting. If this
390 // is negative, then the G must correct this by performing
391 // scan work. We track this in bytes to make it fast to update
392 // and check for debt in the malloc hot path. The assist ratio
393 // determines how this corresponds to scan work debt.
396 // Remaining fields are specific to gccgo.
398 exception unsafe
.Pointer
// current exception being thrown
399 isforeign
bool // whether current exception is not from Go
401 // When using split-stacks, these fields holds the results of
402 // __splitstack_find while executing a syscall. These are used
403 // by the garbage collector to scan the goroutine's stack.
405 // When not using split-stacks, g0 stacks are allocated by the
406 // libc and other goroutine stacks are allocated by malg.
407 // gcstack: unused (sometimes cleared)
408 // gcstacksize: g0: 0; others: size of stack
409 // gcnextsegment: unused
410 // gcnextsp: current SP while executing a syscall
411 // gcinitialsp: g0: top of stack; others: start of stack memory
412 // gcnextsp2: current secondary stack pointer (if present)
413 // gcinitialsp2: start of secondary stack (if present)
416 gcnextsegment
uintptr
418 gcinitialsp unsafe
.Pointer
420 gcinitialsp2 unsafe
.Pointer
422 // gcregs holds the register values while executing a syscall.
423 // This is set by getcontext and scanned by the garbage collector.
426 entry
func(unsafe
.Pointer
) // goroutine function to run
427 entryfn
uintptr // function address passed to __go_go
428 fromgogo
bool // whether entered from gogo function
430 scanningself
bool // whether goroutine is scanning its own stack
432 isSystemGoroutine
bool // whether goroutine is a "system" goroutine
434 traceback
*tracebackg
// stack traceback buffer
436 context g_ucontext_t
// saved context for setcontext
437 stackcontext
[10]uintptr // split-stack context
441 g0
*g
// goroutine with scheduling stack
442 // Not for gccgo: morebuf gobuf // gobuf arg to morestack
443 // Not for gccgo: divmod uint32 // div/mod denominator for arm - known to liblink
445 // Fields not known to debuggers.
446 procid
uint64 // for debuggers, but offset not hard-coded
447 gsignal
*g
// signal-handling g
448 // Not for gccgo: goSigStack gsignalStack // Go-allocated signal handling stack
449 sigmask sigset
// storage for saved signal mask
450 // Not for gccgo: tls [6]uintptr // thread-local storage (for x86 extern register)
452 curg
*g
// current running goroutine
453 caughtsig guintptr
// goroutine running during fatal signal
454 p puintptr
// attached p for executing go code (nil if not executing go code)
459 preemptoff
string // if != "", keep curg running on this m
465 spinning
bool // m is out of work and is actively looking for work
466 blocked
bool // m is blocked on a note
467 inwb
bool // m is executing a write barrier
468 newSigstack
bool // minit on C thread called sigaltstack
470 incgo
bool // m is executing a cgo call
471 freeWait
uint32 // if == 0, safe to free g0 and delete m (atomic)
475 ncgocall
uint64 // number of cgo calls in total
476 ncgo
int32 // number of cgo calls currently in progress
477 // Not for gccgo: cgoCallersUse uint32 // if non-zero, cgoCallers in use temporarily
478 // Not for gccgo: cgoCallers *cgoCallers // cgo traceback if crashing in cgo call
480 alllink
*m
// on allm
484 createstack
[32]location
// stack that created this thread.
485 // Not for gccgo: freglo [16]uint32 // d[i] lsb and f[i]
486 // Not for gccgo: freghi [16]uint32 // d[i] msb and f[i+16]
487 // Not for gccgo: fflag uint32 // floating point compare flags
488 lockedExt
uint32 // tracking for external LockOSThread
489 lockedInt
uint32 // tracking for internal lockOSThread
490 nextwaitm muintptr
// next m waiting for lock
491 waitunlockf unsafe
.Pointer
// todo go func(*g, unsafe.pointer) bool
492 waitlock unsafe
.Pointer
497 // Not for gccgo: thread uintptr // thread handle
498 freelink
*m
// on sched.freem
500 // these are here because they are too large to be on the stack
501 // of low-level NOSPLIT functions.
502 // Not for gccgo: libcall libcall
503 // Not for gccgo: libcallpc uintptr // for cpu profiler
504 // Not for gccgo: libcallsp uintptr
505 // Not for gccgo: libcallg guintptr
506 // Not for gccgo: syscall libcall // stores syscall parameters on windows
510 // Remaining fields are specific to gccgo.
512 gsignalstack unsafe
.Pointer
// stack for gsignal
513 gsignalstacksize
uintptr
515 dropextram
bool // drop after call is done
516 exiting
bool // thread is exiting
525 status
uint32 // one of pidle/prunning/...
527 schedtick
uint32 // incremented on every scheduler call
528 syscalltick
uint32 // incremented on every system call
529 sysmontick sysmontick
// last tick observed by sysmon
530 m muintptr
// back-link to associated m (nil if idle)
534 // gccgo has only one size of defer.
536 deferpoolbuf
[32]*_defer
538 // Cache of goroutine ids, amortizes accesses to runtime·sched.goidgen.
542 // Queue of runnable goroutines. Accessed without lock.
546 // runnext, if non-nil, is a runnable G that was ready'd by
547 // the current G and should be run next instead of what's in
548 // runq if there's time remaining in the running G's time
549 // slice. It will inherit the time left in the current time
550 // slice. If a set of goroutines is locked in a
551 // communicate-and-wait pattern, this schedules that set as a
552 // unit and eliminates the (potentially large) scheduling
553 // latency that otherwise arises from adding the ready'd
554 // goroutines to the end of the run queue.
557 // Available G's (status == Gdead)
566 // traceSweep indicates the sweep events should be traced.
567 // This is used to defer the sweep start event until a span
568 // has actually been swept.
570 // traceSwept and traceReclaimed track the number of bytes
571 // swept and reclaimed by sweeping in the current sweep loop.
572 traceSwept
, traceReclaimed
uintptr
574 palloc persistentAlloc
// per-P to avoid mutex
577 gcAssistTime
int64 // Nanoseconds in assistAlloc
578 gcFractionalMarkTime
int64 // Nanoseconds in fractional mark worker
579 gcBgMarkWorker guintptr
580 gcMarkWorkerMode gcMarkWorkerMode
582 // gcMarkWorkerStartTime is the nanotime() at which this mark
584 gcMarkWorkerStartTime
int64
586 // gcw is this P's GC work buffer cache. The work buffer is
587 // filled by write barriers, drained by mutator assists, and
588 // disposed on certain GC state transitions.
591 // wbBuf is this P's GC write barrier buffer.
593 // TODO: Consider caching this in the running G.
596 runSafePointFn
uint32 // if 1, run sched.safePointFn at next safe point
598 pad
[sys
.CacheLineSize
]byte
602 // accessed atomically. keep at top to ensure alignment on 32-bit systems.
608 // When increasing nmidle, nmidlelocked, nmsys, or nmfreed, be
609 // sure to call checkdead().
611 midle muintptr
// idle m's waiting for work
612 nmidle
int32 // number of idle m's waiting for work
613 nmidlelocked
int32 // number of locked m's waiting for work
614 mnext
int64 // number of m's that have been created and next M ID
615 maxmcount
int32 // maximum number of m's allowed (or die)
616 nmsys
int32 // number of system m's not counted for deadlock
617 nmfreed
int64 // cumulative number of freed m's
619 ngsys
uint32 // number of system goroutines; updated atomically
621 pidle puintptr
// idle p's
623 nmspinning
uint32 // See "Worker thread parking/unparking" comment in proc.go.
625 // Global runnable queue.
630 // Global cache of dead G's.
635 // Central cache of sudog structs.
639 // Central pool of available defer structs.
643 // freem is the list of m's waiting to be freed when their
644 // m.exited is set. Linked through m.freelink.
647 gcwaiting
uint32 // gc is waiting to run
653 // safepointFn should be called on each P at the next GC
654 // safepoint if p.runSafePointFn is set.
659 profilehz
int32 // cpu profiling rate
661 procresizetime
int64 // nanotime() of last change to gomaxprocs
662 totaltime
int64 // ∫gomaxprocs dt up to procresizetime
665 // Values for the flags field of a sigTabT.
667 _SigNotify
= 1 << iota // let signal.Notify have signal, even if from kernel
668 _SigKill
// if signal.Notify doesn't take it, exit quietly
669 _SigThrow
// if signal.Notify doesn't take it, exit loudly
670 _SigPanic
// if the signal is from the kernel, panic
671 _SigDefault
// if the signal isn't explicitly requested, don't monitor it
672 _SigGoExit
// cause all runtime procs to exit (only used on Plan 9).
673 _SigSetStack
// add SA_ONSTACK to libc handler
674 _SigUnblock
// always unblock; see blockableSig
675 _SigIgn
// _SIG_DFL action is to ignore the signal
678 // Lock-free stack node.
679 // // Also known to export_test.go.
685 type forcegcstate
struct {
691 // startup_random_data holds random bytes initialized at startup. These come from
692 // the ELF AT_RANDOM auxiliary vector (vdso_linux_amd64.go or os_linux_386.go).
693 var startupRandomData
[]byte
695 // extendRandom extends the random numbers in r[:n] to the whole slice r.
696 // Treats n<0 as n==0.
697 func extendRandom(r
[]byte, n
int) {
702 // Extend random bits using hash function & time seed
707 h
:= memhash(unsafe
.Pointer(&r
[n
-w
]), uintptr(nanotime()), uintptr(w
))
708 for i
:= 0; i
< sys
.PtrSize
&& n
< len(r
); i
++ {
716 // A _defer holds an entry on the list of deferred calls.
717 // If you add a field here, add code to clear it in freedefer.
719 // The next entry in the stack.
722 // The stack variable for the function which called this defer
723 // statement. This is set to true if we are returning from
724 // that function, false if we are panicing through it.
727 // The value of the panic stack when this function is
728 // deferred. This function can not recover this value from
729 // the panic stack. This can happen if a deferred function
730 // has a defer statement itself.
733 // The panic that caused the defer to run. This is used to
734 // discard panics that have already been handled.
737 // The function to call.
740 // The argument to pass to the function.
743 // The return address that a recover thunk matches against.
744 // This is set by __go_set_defer_retaddr which is called by
745 // the thunks created by defer statements.
748 // Set to true if a function created by reflect.MakeFunc is
749 // permitted to recover. The return address of such a
750 // function function will be somewhere in libffi, so __retaddr
752 makefunccanrecover
bool
756 // This is the gccgo version.
758 // The next entry in the stack.
761 // The value associated with this panic.
764 // Whether this panic has been recovered.
767 // Whether this panic was pushed on the stack because of an
768 // exception thrown in some other language.
771 // Whether this panic was already seen by a deferred function
772 // which called panic again.
777 _TraceRuntimeFrames
= 1 << iota // include frames for internal runtime functions.
778 _TraceTrap
// the initial PC, SP are from a trap, not a return PC from a call
779 _TraceJumpStack
// if traceback is on a systemstack, resume trace at g that called into it
782 // The maximum number of frames we print for a traceback
783 const _TracebackMaxFrames
= 100
788 allp
[]*p
// len(allp) == gomaxprocs; may change at safe points, otherwise immutable
789 allpLock mutex
// Protects P-less reads of allp and all writes
796 // Information about what cpu features are available.
797 // Set on startup in asm_{x86,amd64}.s.
798 // Packages outside the runtime should not use these
799 // as they are not an external api.
805 // lfenceBeforeRdtsc bool
811 // goarm uint8 // set by cmd/link on arm systems
812 // framepointer_enabled bool // set by cmd/link
815 // Set by the linker so the runtime can determine the buildmode.
817 islibrary
bool // -buildmode=c-shared
818 isarchive
bool // -buildmode=c-archive
821 // Types that are only used by gccgo.
823 // g_ucontext_t is a Go version of the C ucontext_t type, used by getcontext.
824 // _sizeof_ucontext_t is defined by mkrsysinfo.sh from <ucontext.h>.
825 // On some systems getcontext and friends require a value that is
826 // aligned to a 16-byte boundary. We implement this by increasing the
827 // required size and picking an appropriate offset when we use the
829 type g_ucontext_t
[(_sizeof_ucontext_t
+ 15) / unsafe
.Sizeof(uintptr(0))]uintptr
831 // sigset is the Go version of the C type sigset_t.
832 // _sigset_t is defined by the Makefile from <signal.h>.
833 type sigset _sigset_t
835 // getMemstats returns a pointer to the internal memstats variable,
837 //go:linkname getMemstats runtime.getMemstats
838 func getMemstats() *mstats
{