1 // Copyright 2013 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
10 "runtime/internal/atomic"
11 "runtime/internal/sys"
16 Stack layout parameters.
17 Included both by runtime (compiled via 6c) and linkers (compiled via gcc).
19 The per-goroutine g->stackguard is set to point StackGuard bytes
20 above the bottom of the stack. Each function compares its stack
21 pointer against g->stackguard to check for overflow. To cut one
22 instruction from the check sequence for functions with tiny frames,
23 the stack is allowed to protrude StackSmall bytes below the stack
24 guard. Functions with large frames don't bother with the check and
25 always call morestack. The sequences are (for amd64, others are
29 frame = function's stack frame size
30 argsize = size of function arguments (call + return)
32 stack frame size <= StackSmall:
35 MOVQ m->morearg, $(argsize << 32)
38 stack frame size > StackSmall but < StackBig
39 LEAQ (frame-StackSmall)(SP), R0
42 MOVQ m->morearg, $(argsize << 32)
45 stack frame size >= StackBig:
46 MOVQ m->morearg, $((argsize << 32) | frame)
49 The bottom StackGuard - StackSmall bytes are important: there has
50 to be enough room to execute functions that refuse to check for
51 stack overflow, either because they need to be adjacent to the
52 actual caller's frame (deferproc) or because they handle the imminent
53 stack overflow (morestack).
55 For example, deferproc might call malloc, which does one of the
56 above checks (without allocating a full frame), which might trigger
57 a call to morestack. This sequence needs to fit in the bottom
58 section of the stack. On amd64, morestack's frame is 40 bytes, and
59 deferproc's frame is 56 bytes. That fits well within the
60 StackGuard - StackSmall bytes at the bottom.
61 The linkers explore all possible call traces involving non-splitting
62 functions to make sure that this limit cannot be violated.
66 // StackSystem is a number of additional bytes to add
67 // to each stack below the usual guard area for OS-specific
68 // purposes like signal handling. Used on Windows, Plan 9,
69 // and Darwin/ARM because they do not use a separate stack.
70 _StackSystem
= sys
.GoosWindows
*512*sys
.PtrSize
+ sys
.GoosPlan9
*512 + sys
.GoosDarwin
*sys
.GoarchArm
*1024
72 // The minimum size of stack used by Go code
75 // The minimum stack size to allocate.
76 // The hackery here rounds FixedStack0 up to a power of 2.
77 _FixedStack0
= _StackMin
+ _StackSystem
78 _FixedStack1
= _FixedStack0
- 1
79 _FixedStack2
= _FixedStack1 |
(_FixedStack1
>> 1)
80 _FixedStack3
= _FixedStack2 |
(_FixedStack2
>> 2)
81 _FixedStack4
= _FixedStack3 |
(_FixedStack3
>> 4)
82 _FixedStack5
= _FixedStack4 |
(_FixedStack4
>> 8)
83 _FixedStack6
= _FixedStack5 |
(_FixedStack5
>> 16)
84 _FixedStack
= _FixedStack6
+ 1
86 // Functions that need frames bigger than this use an extra
87 // instruction to do the stack split check, to avoid overflow
88 // in case SP - framesize wraps below zero.
89 // This value can be no bigger than the size of the unmapped
93 // The stack guard is a pointer this many bytes above the
94 // bottom of the stack.
95 _StackGuard
= 880*sys
.StackGuardMultiplier
+ _StackSystem
97 // After a stack split check the SP is allowed to be this
98 // many bytes below the stack guard. This saves an instruction
99 // in the checking sequence for tiny frames.
102 // The maximum number of bytes that a chain of NOSPLIT
103 // functions can use.
104 _StackLimit
= _StackGuard
- _StackSystem
- _StackSmall
107 // Goroutine preemption request.
108 // Stored into g->stackguard0 to cause split stack check failure.
109 // Must be greater than any real sp.
110 // 0xfffffade in hex.
112 _StackPreempt
= uintptrMask
& -1314
113 _StackFork
= uintptrMask
& -1234
117 // stackDebug == 0: no logging
118 // == 1: logging of per-stack operations
119 // == 2: logging of per-frame operations
120 // == 3: logging of per-word updates
121 // == 4: logging of per-word reads
123 stackFromSystem
= 0 // allocate stacks from system memory instead of the heap
124 stackFaultOnFree
= 0 // old stacks are mapped noaccess to detect use after free
125 stackPoisonCopy
= 0 // fill stack that should not be accessed with garbage, to detect bad dereferences during copy
129 // check the BP links during traceback.
134 uintptrMask
= 1<<(8*sys
.PtrSize
) - 1
136 // Goroutine preemption request.
137 // Stored into g->stackguard0 to cause split stack check failure.
138 // Must be greater than any real sp.
139 // 0xfffffade in hex.
140 stackPreempt
= uintptrMask
& -1314
142 // Thread is forking.
143 // Stored into g->stackguard0 to cause split stack check failure.
144 // Must be greater than any real sp.
145 stackFork
= uintptrMask
& -1234
148 // Global pool of spans that have free stacks.
149 // Stacks are assigned an order according to size.
150 // order = log_2(size/FixedStack)
151 // There is a free list for each order.
152 // TODO: one lock per order?
153 var stackpool
[_NumStackOrders
]mSpanList
154 var stackpoolmu mutex
156 // Global pool of large stack spans.
157 var stackLarge
struct {
159 free
[_MHeapMap_Bits
]mSpanList
// free lists by log_2(s.npages)
163 if _StackCacheSize
&_PageMask
!= 0 {
164 throw("cache size must be a multiple of page size")
166 for i
:= range stackpool
{
169 for i
:= range stackLarge
.free
{
170 stackLarge
.free
[i
].init()
174 // stacklog2 returns ⌊log_2(n)⌋.
175 func stacklog2(n
uintptr) int {
184 // Allocates a stack from the free pool. Must be called with
186 func stackpoolalloc(order
uint8) gclinkptr
{
187 list
:= &stackpool
[order
]
190 // no free stacks. Allocate another span worth.
191 s
= mheap_
.allocStack(_StackCacheSize
>> _PageShift
)
193 throw("out of memory")
195 if s
.allocCount
!= 0 {
196 throw("bad allocCount")
198 if s
.stackfreelist
.ptr() != nil {
199 throw("bad stackfreelist")
201 for i
:= uintptr(0); i
< _StackCacheSize
; i
+= _FixedStack
<< order
{
202 x
:= gclinkptr(s
.base() + i
)
203 x
.ptr().next
= s
.stackfreelist
210 throw("span has no free stacks")
212 s
.stackfreelist
= x
.ptr().next
214 if s
.stackfreelist
.ptr() == nil {
215 // all stacks in s are allocated.
221 // Adds stack x to the free pool. Must be called with stackpoolmu held.
222 func stackpoolfree(x gclinkptr
, order
uint8) {
223 s
:= mheap_
.lookup(unsafe
.Pointer(x
))
224 if s
.state
!= _MSpanStack
{
225 throw("freeing stack not in a stack span")
227 if s
.stackfreelist
.ptr() == nil {
228 // s will now have a free stack
229 stackpool
[order
].insert(s
)
231 x
.ptr().next
= s
.stackfreelist
234 if gcphase
== _GCoff
&& s
.allocCount
== 0 {
235 // Span is completely free. Return it to the heap
236 // immediately if we're sweeping.
238 // If GC is active, we delay the free until the end of
239 // GC to avoid the following type of situation:
241 // 1) GC starts, scans a SudoG but does not yet mark the SudoG.elem pointer
242 // 2) The stack that pointer points to is copied
243 // 3) The old stack is freed
244 // 4) The containing span is marked free
245 // 5) GC attempts to mark the SudoG.elem pointer. The
246 // marking fails because the pointer looks like a
247 // pointer into a free span.
249 // By not freeing, we prevent step #4 until GC is done.
250 stackpool
[order
].remove(s
)
256 // stackcacherefill/stackcacherelease implement a global pool of stack segments.
257 // The pool is required to prevent unlimited growth of per-thread caches.
260 func stackcacherefill(c
*mcache
, order
uint8) {
262 print("stackcacherefill order=", order
, "\n")
265 // Grab some stacks from the global cache.
266 // Grab half of the allowed capacity (to prevent thrashing).
270 for size
< _StackCacheSize
/2 {
271 x
:= stackpoolalloc(order
)
274 size
+= _FixedStack
<< order
277 c
.stackcache
[order
].list
= list
278 c
.stackcache
[order
].size
= size
282 func stackcacherelease(c
*mcache
, order
uint8) {
284 print("stackcacherelease order=", order
, "\n")
286 x
:= c
.stackcache
[order
].list
287 size
:= c
.stackcache
[order
].size
289 for size
> _StackCacheSize
/2 {
291 stackpoolfree(x
, order
)
293 size
-= _FixedStack
<< order
296 c
.stackcache
[order
].list
= x
297 c
.stackcache
[order
].size
= size
301 func stackcache_clear(c
*mcache
) {
303 print("stackcache clear\n")
306 for order
:= uint8(0); order
< _NumStackOrders
; order
++ {
307 x
:= c
.stackcache
[order
].list
310 stackpoolfree(x
, order
)
313 c
.stackcache
[order
].list
= 0
314 c
.stackcache
[order
].size
= 0
319 // stackalloc allocates an n byte stack.
321 // stackalloc must run on the system stack because it uses per-P
322 // resources and must not split the stack.
325 func stackalloc(n
uint32) (stack
, []stkbar
) {
326 // Stackalloc must be called on scheduler stack, so that we
327 // never try to grow the stack during the code that stackalloc runs.
328 // Doing so would cause a deadlock (issue 1547).
330 if thisg
!= thisg
.m
.g0
{
331 throw("stackalloc not on scheduler stack")
334 throw("stack size not a power of 2")
337 print("stackalloc ", n
, "\n")
340 // Compute the size of stack barrier array.
341 maxstkbar
:= gcMaxStackBarriers(int(n
))
342 nstkbar
:= unsafe
.Sizeof(stkbar
{}) * uintptr(maxstkbar
)
343 var stkbarSlice slice
345 if debug
.efence
!= 0 || stackFromSystem
!= 0 {
346 v
:= sysAlloc(round(uintptr(n
), _PageSize
), &memstats
.stacks_sys
)
348 throw("out of memory (stackalloc)")
350 top
:= uintptr(n
) - nstkbar
352 stkbarSlice
= slice
{add(v
, top
), 0, maxstkbar
}
354 return stack
{uintptr(v
), uintptr(v
) + top
}, *(*[]stkbar
)(unsafe
.Pointer(&stkbarSlice
))
357 // Small stacks are allocated with a fixed-size free-list allocator.
358 // If we need a stack of a bigger size, we fall back on allocating
361 if stackCache
!= 0 && n
< _FixedStack
<<_NumStackOrders
&& n
< _StackCacheSize
{
364 for n2
> _FixedStack
{
370 if c
== nil || thisg
.m
.preemptoff
!= "" || thisg
.m
.helpgc
!= 0 {
371 // c == nil can happen in the guts of exitsyscall or
372 // procresize. Just get a stack from the global pool.
373 // Also don't touch stackcache during gc
374 // as it's flushed concurrently.
376 x
= stackpoolalloc(order
)
379 x
= c
.stackcache
[order
].list
381 stackcacherefill(c
, order
)
382 x
= c
.stackcache
[order
].list
384 c
.stackcache
[order
].list
= x
.ptr().next
385 c
.stackcache
[order
].size
-= uintptr(n
)
387 v
= unsafe
.Pointer(x
)
390 npage
:= uintptr(n
) >> _PageShift
391 log2npage
:= stacklog2(npage
)
393 // Try to get a stack from the large stack cache.
394 lock(&stackLarge
.lock
)
395 if !stackLarge
.free
[log2npage
].isEmpty() {
396 s
= stackLarge
.free
[log2npage
].first
397 stackLarge
.free
[log2npage
].remove(s
)
399 unlock(&stackLarge
.lock
)
402 // Allocate a new stack from the heap.
403 s
= mheap_
.allocStack(npage
)
405 throw("out of memory")
408 v
= unsafe
.Pointer(s
.base())
412 racemalloc(v
, uintptr(n
))
415 msanmalloc(v
, uintptr(n
))
418 print(" allocated ", v
, "\n")
420 top
:= uintptr(n
) - nstkbar
422 stkbarSlice
= slice
{add(v
, top
), 0, maxstkbar
}
424 return stack
{uintptr(v
), uintptr(v
) + top
}, *(*[]stkbar
)(unsafe
.Pointer(&stkbarSlice
))
427 // stackfree frees an n byte stack allocation at stk.
429 // stackfree must run on the system stack because it uses per-P
430 // resources and must not split the stack.
433 func stackfree(stk stack
, n
uintptr) {
435 v
:= unsafe
.Pointer(stk
.lo
)
437 throw("stack not a power of 2")
439 if stk
.lo
+n
< stk
.hi
{
440 throw("bad stack size")
443 println("stackfree", v
, n
)
444 memclrNoHeapPointers(v
, n
) // for testing, clobber stack data
446 if debug
.efence
!= 0 || stackFromSystem
!= 0 {
447 if debug
.efence
!= 0 || stackFaultOnFree
!= 0 {
450 sysFree(v
, n
, &memstats
.stacks_sys
)
457 if stackCache
!= 0 && n
< _FixedStack
<<_NumStackOrders
&& n
< _StackCacheSize
{
460 for n2
> _FixedStack
{
466 if c
== nil || gp
.m
.preemptoff
!= "" || gp
.m
.helpgc
!= 0 {
468 stackpoolfree(x
, order
)
471 if c
.stackcache
[order
].size
>= _StackCacheSize
{
472 stackcacherelease(c
, order
)
474 x
.ptr().next
= c
.stackcache
[order
].list
475 c
.stackcache
[order
].list
= x
476 c
.stackcache
[order
].size
+= n
479 s
:= mheap_
.lookup(v
)
480 if s
.state
!= _MSpanStack
{
481 println(hex(s
.base()), v
)
482 throw("bad span state")
484 if gcphase
== _GCoff
{
485 // Free the stack immediately if we're
489 // If the GC is running, we can't return a
490 // stack span to the heap because it could be
491 // reused as a heap span, and this state
492 // change would race with GC. Add it to the
493 // large stack cache instead.
494 log2npage
:= stacklog2(s
.npages
)
495 lock(&stackLarge
.lock
)
496 stackLarge
.free
[log2npage
].insert(s
)
497 unlock(&stackLarge
.lock
)
502 var maxstacksize
uintptr = 1 << 20 // enough until runtime.main sets it for real
504 var ptrnames
= []string{
509 // Stack frame layout
512 // +------------------+
513 // | args from caller |
514 // +------------------+ <- frame->argp
515 // | return address |
516 // +------------------+
517 // | caller's BP (*) | (*) if framepointer_enabled && varp < sp
518 // +------------------+ <- frame->varp
520 // +------------------+
521 // | args to callee |
522 // +------------------+ <- frame->sp
525 // +------------------+
526 // | args from caller |
527 // +------------------+ <- frame->argp
528 // | caller's retaddr |
529 // +------------------+ <- frame->varp
531 // +------------------+
532 // | args to callee |
533 // +------------------+
534 // | return address |
535 // +------------------+ <- frame->sp
537 type adjustinfo
struct {
539 delta
uintptr // ptr distance from old to new stack (newbase - oldbase)
542 // sghi is the highest sudog.elem on the stack.
546 // Adjustpointer checks whether *vpp is in the old stack described by adjinfo.
547 // If so, it rewrites *vpp to point into the new stack.
548 func adjustpointer(adjinfo
*adjustinfo
, vpp unsafe
.Pointer
) {
549 pp
:= (*uintptr)(vpp
)
552 print(" ", pp
, ":", hex(p
), "\n")
554 if adjinfo
.old
.lo
<= p
&& p
< adjinfo
.old
.hi
{
555 *pp
= p
+ adjinfo
.delta
557 print(" adjust ptr ", pp
, ":", hex(p
), " -> ", hex(*pp
), "\n")
562 // Information from the compiler about the layout of stack frames.
563 type bitvector
struct {
568 type gobitvector
struct {
573 func gobv(bv bitvector
) gobitvector
{
576 (*[1 << 30]byte)(unsafe
.Pointer(bv
.bytedata
))[:(bv
.n
+7)/8],
580 func ptrbit(bv
*gobitvector
, i
uintptr) uint8 {
581 return (bv
.bytedata
[i
/8] >> (i
% 8)) & 1
584 // bv describes the memory starting at address scanp.
585 // Adjust any pointers contained therein.
586 func adjustpointers(scanp unsafe
.Pointer
, cbv
*bitvector
, adjinfo
*adjustinfo
, f
*_func
) {
588 minp
:= adjinfo
.old
.lo
589 maxp
:= adjinfo
.old
.hi
590 delta
:= adjinfo
.delta
592 // If this frame might contain channel receive slots, use CAS
593 // to adjust pointers. If the slot hasn't been received into
594 // yet, it may contain stack pointers and a concurrent send
595 // could race with adjusting those pointers. (The sent value
596 // itself can never contain stack pointers.)
597 useCAS
:= uintptr(scanp
) < adjinfo
.sghi
598 for i
:= uintptr(0); i
< num
; i
++ {
600 print(" ", add(scanp
, i
*sys
.PtrSize
), ":", ptrnames
[ptrbit(&bv
, i
)], ":", hex(*(*uintptr)(add(scanp
, i
*sys
.PtrSize
))), " # ", i
, " ", bv
.bytedata
[i
/8], "\n")
602 if ptrbit(&bv
, i
) == 1 {
603 pp
:= (*uintptr)(add(scanp
, i
*sys
.PtrSize
))
606 if f
!= nil && 0 < p
&& p
< minLegalPointer
&& debug
.invalidptr
!= 0 {
607 // Looks like a junk value in a pointer slot.
608 // Live analysis wrong?
609 getg().m
.traceback
= 2
610 print("runtime: bad pointer in frame ", funcname(f
), " at ", pp
, ": ", hex(p
), "\n")
611 throw("invalid pointer found on stack")
613 if minp
<= p
&& p
< maxp
{
615 print("adjust ptr ", hex(p
), " ", funcname(f
), "\n")
618 ppu
:= (*unsafe
.Pointer
)(unsafe
.Pointer(pp
))
619 if !atomic
.Casp1(ppu
, unsafe
.Pointer(p
), unsafe
.Pointer(p
+delta
)) {
630 // Note: the argument/return area is adjusted by the callee.
631 func adjustframe(frame
*stkframe
, arg unsafe
.Pointer
) bool {
632 adjinfo
:= (*adjustinfo
)(arg
)
633 targetpc
:= frame
.continpc
640 print(" adjusting ", funcname(f
), " frame=[", hex(frame
.sp
), ",", hex(frame
.fp
), "] pc=", hex(frame
.pc
), " continpc=", hex(frame
.continpc
), "\n")
642 if f
.entry
== systemstack_switchPC
{
643 // A special routine at the bottom of stack of a goroutine that does an systemstack call.
644 // We will allow it to be copied even though we don't
645 // have full GC info for it (because it is written in asm).
648 if targetpc
!= f
.entry
{
651 pcdata
:= pcdatavalue(f
, _PCDATA_StackMapIndex
, targetpc
, &adjinfo
.cache
)
653 pcdata
= 0 // in prologue
656 // Adjust local variables if stack frame has been allocated.
657 size
:= frame
.varp
- frame
.sp
659 switch sys
.ArchFamily
{
661 minsize
= sys
.SpAlign
663 minsize
= sys
.MinFrameSize
667 stackmap
:= (*stackmap
)(funcdata(f
, _FUNCDATA_LocalsPointerMaps
))
668 if stackmap
== nil || stackmap
.n
<= 0 {
669 print("runtime: frame ", funcname(f
), " untyped locals ", hex(frame
.varp
-size
), "+", hex(size
), "\n")
670 throw("missing stackmap")
672 // Locals bitmap information, scan just the pointers in locals.
673 if pcdata
< 0 || pcdata
>= stackmap
.n
{
674 // don't know where we are
675 print("runtime: pcdata is ", pcdata
, " and ", stackmap
.n
, " locals stack map entries for ", funcname(f
), " (targetpc=", targetpc
, ")\n")
676 throw("bad symbol table")
678 bv
= stackmapdata(stackmap
, pcdata
)
679 size
= uintptr(bv
.n
) * sys
.PtrSize
681 print(" locals ", pcdata
, "/", stackmap
.n
, " ", size
/sys
.PtrSize
, " words ", bv
.bytedata
, "\n")
683 adjustpointers(unsafe
.Pointer(frame
.varp
-size
), &bv
, adjinfo
, f
)
686 // Adjust saved base pointer if there is one.
687 if sys
.ArchFamily
== sys
.AMD64
&& frame
.argp
-frame
.varp
== 2*sys
.RegSize
{
688 if !framepointer_enabled
{
689 print("runtime: found space for saved base pointer, but no framepointer experiment\n")
690 print("argp=", hex(frame
.argp
), " varp=", hex(frame
.varp
), "\n")
691 throw("bad frame layout")
697 // Frame pointers should always point to the next higher frame on
698 // the Go stack (or be nil, for the top frame on the stack).
699 bp
:= *(*uintptr)(unsafe
.Pointer(frame
.varp
))
700 if bp
!= 0 && (bp
< adjinfo
.old
.lo || bp
>= adjinfo
.old
.hi
) {
701 println("runtime: found invalid frame pointer")
702 print("bp=", hex(bp
), " min=", hex(adjinfo
.old
.lo
), " max=", hex(adjinfo
.old
.hi
), "\n")
703 throw("bad frame pointer")
706 adjustpointer(adjinfo
, unsafe
.Pointer(frame
.varp
))
710 if frame
.arglen
> 0 {
712 if frame
.argmap
!= nil {
715 stackmap
:= (*stackmap
)(funcdata(f
, _FUNCDATA_ArgsPointerMaps
))
716 if stackmap
== nil || stackmap
.n
<= 0 {
717 print("runtime: frame ", funcname(f
), " untyped args ", frame
.argp
, "+", frame
.arglen
, "\n")
718 throw("missing stackmap")
720 if pcdata
< 0 || pcdata
>= stackmap
.n
{
721 // don't know where we are
722 print("runtime: pcdata is ", pcdata
, " and ", stackmap
.n
, " args stack map entries for ", funcname(f
), " (targetpc=", targetpc
, ")\n")
723 throw("bad symbol table")
725 bv
= stackmapdata(stackmap
, pcdata
)
730 adjustpointers(unsafe
.Pointer(frame
.argp
), &bv
, adjinfo
, nil)
735 func adjustctxt(gp
*g
, adjinfo
*adjustinfo
) {
736 adjustpointer(adjinfo
, unsafe
.Pointer(&gp
.sched
.ctxt
))
737 if !framepointer_enabled
{
742 if bp
!= 0 && (bp
< adjinfo
.old
.lo || bp
>= adjinfo
.old
.hi
) {
743 println("runtime: found invalid top frame pointer")
744 print("bp=", hex(bp
), " min=", hex(adjinfo
.old
.lo
), " max=", hex(adjinfo
.old
.hi
), "\n")
745 throw("bad top frame pointer")
748 adjustpointer(adjinfo
, unsafe
.Pointer(&gp
.sched
.bp
))
751 func adjustdefers(gp
*g
, adjinfo
*adjustinfo
) {
752 // Adjust defer argument blocks the same way we adjust active stack frames.
753 tracebackdefers(gp
, adjustframe
, noescape(unsafe
.Pointer(adjinfo
)))
755 // Adjust pointers in the Defer structs.
756 // Defer structs themselves are never on the stack.
757 for d
:= gp
._defer
; d
!= nil; d
= d
.link
{
758 adjustpointer(adjinfo
, unsafe
.Pointer(&d
.fn
))
759 adjustpointer(adjinfo
, unsafe
.Pointer(&d
.sp
))
760 adjustpointer(adjinfo
, unsafe
.Pointer(&d
._panic
))
764 func adjustpanics(gp
*g
, adjinfo
*adjustinfo
) {
765 // Panics are on stack and already adjusted.
766 // Update pointer to head of list in G.
767 adjustpointer(adjinfo
, unsafe
.Pointer(&gp
._panic
))
770 func adjustsudogs(gp
*g
, adjinfo
*adjustinfo
) {
771 // the data elements pointed to by a SudoG structure
772 // might be in the stack.
773 for s
:= gp
.waiting
; s
!= nil; s
= s
.waitlink
{
774 adjustpointer(adjinfo
, unsafe
.Pointer(&s
.elem
))
775 adjustpointer(adjinfo
, unsafe
.Pointer(&s
.selectdone
))
779 func adjuststkbar(gp
*g
, adjinfo
*adjustinfo
) {
780 for i
:= int(gp
.stkbarPos
); i
< len(gp
.stkbar
); i
++ {
781 adjustpointer(adjinfo
, unsafe
.Pointer(&gp
.stkbar
[i
].savedLRPtr
))
785 func fillstack(stk stack
, b
byte) {
786 for p
:= stk
.lo
; p
< stk
.hi
; p
++ {
787 *(*byte)(unsafe
.Pointer(p
)) = b
791 func findsghi(gp
*g
, stk stack
) uintptr {
793 for sg
:= gp
.waiting
; sg
!= nil; sg
= sg
.waitlink
{
794 p
:= uintptr(sg
.elem
) + uintptr(sg
.c
.elemsize
)
795 if stk
.lo
<= p
&& p
< stk
.hi
&& p
> sghi
{
798 p
= uintptr(unsafe
.Pointer(sg
.selectdone
)) + unsafe
.Sizeof(sg
.selectdone
)
799 if stk
.lo
<= p
&& p
< stk
.hi
&& p
> sghi
{
806 // syncadjustsudogs adjusts gp's sudogs and copies the part of gp's
807 // stack they refer to while synchronizing with concurrent channel
808 // operations. It returns the number of bytes of stack copied.
809 func syncadjustsudogs(gp
*g
, used
uintptr, adjinfo
*adjustinfo
) uintptr {
810 if gp
.waiting
== nil {
814 // Lock channels to prevent concurrent send/receive.
815 // It's important that we *only* do this for async
816 // copystack; otherwise, gp may be in the middle of
817 // putting itself on wait queues and this would
820 for sg
:= gp
.waiting
; sg
!= nil; sg
= sg
.waitlink
{
828 adjustsudogs(gp
, adjinfo
)
830 // Copy the part of the stack the sudogs point in to
831 // while holding the lock to prevent races on
832 // send/receive slots.
834 if adjinfo
.sghi
!= 0 {
835 oldBot
:= adjinfo
.old
.hi
- used
836 newBot
:= oldBot
+ adjinfo
.delta
837 sgsize
= adjinfo
.sghi
- oldBot
838 memmove(unsafe
.Pointer(newBot
), unsafe
.Pointer(oldBot
), sgsize
)
843 for sg
:= gp
.waiting
; sg
!= nil; sg
= sg
.waitlink
{
853 // Copies gp's stack to a new stack of a different size.
854 // Caller must have changed gp status to Gcopystack.
856 // If sync is true, this is a self-triggered stack growth and, in
857 // particular, no other G may be writing to gp's stack (e.g., via a
858 // channel operation). If sync is false, copystack protects against
859 // concurrent channel operations.
860 func copystack(gp
*g
, newsize
uintptr, sync
bool) {
861 if gp
.syscallsp
!= 0 {
862 throw("stack growth not allowed in system call")
866 throw("nil stackbase")
868 used
:= old
.hi
- gp
.sched
.sp
870 // allocate new stack
871 new, newstkbar
:= stackalloc(uint32(newsize
))
872 if stackPoisonCopy
!= 0 {
876 print("copystack gp=", gp
, " [", hex(old
.lo
), " ", hex(old
.hi
-used
), " ", hex(old
.hi
), "]/", gp
.stackAlloc
, " -> [", hex(new.lo
), " ", hex(new.hi
-used
), " ", hex(new.hi
), "]/", newsize
, "\n")
879 // Compute adjustment.
880 var adjinfo adjustinfo
882 adjinfo
.delta
= new.hi
- old
.hi
884 // Adjust sudogs, synchronizing with channel ops if necessary.
887 adjustsudogs(gp
, &adjinfo
)
889 // sudogs can point in to the stack. During concurrent
890 // shrinking, these areas may be written to. Find the
891 // highest such pointer so we can handle everything
892 // there and below carefully. (This shouldn't be far
893 // from the bottom of the stack, so there's little
894 // cost in handling everything below it carefully.)
895 adjinfo
.sghi
= findsghi(gp
, old
)
897 // Synchronize with channel ops and copy the part of
898 // the stack they may interact with.
899 ncopy
-= syncadjustsudogs(gp
, used
, &adjinfo
)
902 // Copy the stack (or the rest of it) to the new location
903 memmove(unsafe
.Pointer(new.hi
-ncopy
), unsafe
.Pointer(old
.hi
-ncopy
), ncopy
)
905 // Disallow sigprof scans of this stack and block if there's
907 gcLockStackBarriers(gp
)
909 // Adjust remaining structures that have pointers into stacks.
910 // We have to do most of these before we traceback the new
911 // stack because gentraceback uses them.
912 adjustctxt(gp
, &adjinfo
)
913 adjustdefers(gp
, &adjinfo
)
914 adjustpanics(gp
, &adjinfo
)
915 adjuststkbar(gp
, &adjinfo
)
916 if adjinfo
.sghi
!= 0 {
917 adjinfo
.sghi
+= adjinfo
.delta
920 // copy old stack barriers to new stack barrier array
921 newstkbar
= newstkbar
[:len(gp
.stkbar
)]
922 copy(newstkbar
, gp
.stkbar
)
924 // Swap out old stack for new one
926 gp
.stackguard0
= new.lo
+ _StackGuard
// NOTE: might clobber a preempt request
927 gp
.sched
.sp
= new.hi
- used
928 oldsize
:= gp
.stackAlloc
929 gp
.stackAlloc
= newsize
930 gp
.stkbar
= newstkbar
931 gp
.stktopsp
+= adjinfo
.delta
933 // Adjust pointers in the new stack.
934 gentraceback(^uintptr(0), ^uintptr(0), 0, gp
, 0, nil, 0x7fffffff, adjustframe
, noescape(unsafe
.Pointer(&adjinfo
)), 0)
936 gcUnlockStackBarriers(gp
)
939 if stackPoisonCopy
!= 0 {
942 stackfree(old
, oldsize
)
945 // round x up to a power of 2.
946 func round2(x
int32) int32 {
954 // Called from runtime·morestack when more stack is needed.
955 // Allocate larger stack and relocate to new stack.
956 // Stack growth is multiplicative, for constant amortized cost.
958 // g->atomicstatus will be Grunning or Gscanrunning upon entry.
959 // If the GC is trying to stop this g then it will set preemptscan to true.
961 // ctxt is the value of the context register on morestack. newstack
962 // will write it to g.sched.ctxt.
963 func newstack(ctxt unsafe
.Pointer
) {
965 // TODO: double check all gp. shouldn't be getg().
966 if thisg
.m
.morebuf
.g
.ptr().stackguard0
== stackFork
{
967 throw("stack growth after fork")
969 if thisg
.m
.morebuf
.g
.ptr() != thisg
.m
.curg
{
970 print("runtime: newstack called from g=", hex(thisg
.m
.morebuf
.g
), "\n"+"\tm=", thisg
.m
, " m->curg=", thisg
.m
.curg
, " m->g0=", thisg
.m
.g0
, " m->gsignal=", thisg
.m
.gsignal
, "\n")
971 morebuf
:= thisg
.m
.morebuf
972 traceback(morebuf
.pc
, morebuf
.sp
, morebuf
.lr
, morebuf
.g
.ptr())
973 throw("runtime: wrong goroutine in newstack")
977 // Write ctxt to gp.sched. We do this here instead of in
978 // morestack so it has the necessary write barrier.
981 if thisg
.m
.curg
.throwsplit
{
982 // Update syscallsp, syscallpc in case traceback uses them.
983 morebuf
:= thisg
.m
.morebuf
984 gp
.syscallsp
= morebuf
.sp
985 gp
.syscallpc
= morebuf
.pc
986 print("runtime: newstack sp=", hex(gp
.sched
.sp
), " stack=[", hex(gp
.stack
.lo
), ", ", hex(gp
.stack
.hi
), "]\n",
987 "\tmorebuf={pc:", hex(morebuf
.pc
), " sp:", hex(morebuf
.sp
), " lr:", hex(morebuf
.lr
), "}\n",
988 "\tsched={pc:", hex(gp
.sched
.pc
), " sp:", hex(gp
.sched
.sp
), " lr:", hex(gp
.sched
.lr
), " ctxt:", gp
.sched
.ctxt
, "}\n")
990 traceback(morebuf
.pc
, morebuf
.sp
, morebuf
.lr
, gp
)
991 throw("runtime: stack split at bad time")
994 morebuf
:= thisg
.m
.morebuf
995 thisg
.m
.morebuf
.pc
= 0
996 thisg
.m
.morebuf
.lr
= 0
997 thisg
.m
.morebuf
.sp
= 0
998 thisg
.m
.morebuf
.g
= 0
1000 // NOTE: stackguard0 may change underfoot, if another thread
1001 // is about to try to preempt gp. Read it just once and use that same
1002 // value now and below.
1003 preempt
:= atomic
.Loaduintptr(&gp
.stackguard0
) == stackPreempt
1005 // Be conservative about where we preempt.
1006 // We are interested in preempting user Go code, not runtime code.
1007 // If we're holding locks, mallocing, or preemption is disabled, don't
1009 // This check is very early in newstack so that even the status change
1010 // from Grunning to Gwaiting and back doesn't happen in this case.
1011 // That status change by itself can be viewed as a small preemption,
1012 // because the GC might change Gwaiting to Gscanwaiting, and then
1013 // this goroutine has to wait for the GC to finish before continuing.
1014 // If the GC is in some way dependent on this goroutine (for example,
1015 // it needs a lock held by the goroutine), that small preemption turns
1016 // into a real deadlock.
1018 if thisg
.m
.locks
!= 0 || thisg
.m
.mallocing
!= 0 || thisg
.m
.preemptoff
!= "" || thisg
.m
.p
.ptr().status
!= _Prunning
{
1019 // Let the goroutine keep running for now.
1020 // gp->preempt is set, so it will be preempted next time.
1021 gp
.stackguard0
= gp
.stack
.lo
+ _StackGuard
1022 gogo(&gp
.sched
) // never return
1026 if gp
.stack
.lo
== 0 {
1027 throw("missing stack in newstack")
1030 if sys
.ArchFamily
== sys
.AMD64 || sys
.ArchFamily
== sys
.I386
{
1031 // The call to morestack cost a word.
1034 if stackDebug
>= 1 || sp
< gp
.stack
.lo
{
1035 print("runtime: newstack sp=", hex(sp
), " stack=[", hex(gp
.stack
.lo
), ", ", hex(gp
.stack
.hi
), "]\n",
1036 "\tmorebuf={pc:", hex(morebuf
.pc
), " sp:", hex(morebuf
.sp
), " lr:", hex(morebuf
.lr
), "}\n",
1037 "\tsched={pc:", hex(gp
.sched
.pc
), " sp:", hex(gp
.sched
.sp
), " lr:", hex(gp
.sched
.lr
), " ctxt:", gp
.sched
.ctxt
, "}\n")
1039 if sp
< gp
.stack
.lo
{
1040 print("runtime: gp=", gp
, ", gp->status=", hex(readgstatus(gp
)), "\n ")
1041 print("runtime: split stack overflow: ", hex(sp
), " < ", hex(gp
.stack
.lo
), "\n")
1042 throw("runtime: split stack overflow")
1046 if gp
== thisg
.m
.g0
{
1047 throw("runtime: preempt g0")
1049 if thisg
.m
.p
== 0 && thisg
.m
.locks
== 0 {
1050 throw("runtime: g is running but p is not")
1052 // Synchronize with scang.
1053 casgstatus(gp
, _Grunning
, _Gwaiting
)
1055 for !castogscanstatus(gp
, _Gwaiting
, _Gscanwaiting
) {
1056 // Likely to be racing with the GC as
1057 // it sees a _Gwaiting and does the
1058 // stack scan. If so, gcworkdone will
1059 // be set and gcphasework will simply
1063 // gcw is safe because we're on the
1065 gcw
:= &gp
.m
.p
.ptr().gcw
1067 if gcBlackenPromptly
{
1070 gp
.gcscandone
= true
1072 gp
.preemptscan
= false
1074 casfrom_Gscanstatus(gp
, _Gscanwaiting
, _Gwaiting
)
1075 // This clears gcscanvalid.
1076 casgstatus(gp
, _Gwaiting
, _Grunning
)
1077 gp
.stackguard0
= gp
.stack
.lo
+ _StackGuard
1078 gogo(&gp
.sched
) // never return
1081 // Act like goroutine called runtime.Gosched.
1082 casgstatus(gp
, _Gwaiting
, _Grunning
)
1083 gopreempt_m(gp
) // never return
1086 // Allocate a bigger segment and move the stack.
1087 oldsize
:= int(gp
.stackAlloc
)
1088 newsize
:= oldsize
* 2
1089 if uintptr(newsize
) > maxstacksize
{
1090 print("runtime: goroutine stack exceeds ", maxstacksize
, "-byte limit\n")
1091 throw("stack overflow")
1094 // The goroutine must be executing in order to call newstack,
1095 // so it must be Grunning (or Gscanrunning).
1096 casgstatus(gp
, _Grunning
, _Gcopystack
)
1098 // The concurrent GC will not scan the stack while we are doing the copy since
1099 // the gp is in a Gcopystack status.
1100 copystack(gp
, uintptr(newsize
), true)
1101 if stackDebug
>= 1 {
1102 print("stack grow done\n")
1104 casgstatus(gp
, _Gcopystack
, _Grunning
)
1113 // adjust Gobuf as if it executed a call to fn
1114 // and then did an immediate gosave.
1115 func gostartcallfn(gobuf
*gobuf
, fv
*funcval
) {
1116 var fn unsafe
.Pointer
1118 fn
= unsafe
.Pointer(fv
.fn
)
1120 fn
= unsafe
.Pointer(funcPC(nilfunc
))
1122 gostartcall(gobuf
, fn
, unsafe
.Pointer(fv
))
1125 // Maybe shrink the stack being used by gp.
1126 // Called at garbage collection time.
1127 // gp must be stopped, but the world need not be.
1128 func shrinkstack(gp
*g
) {
1129 gstatus
:= readgstatus(gp
)
1130 if gstatus
&^_Gscan
== _Gdead
{
1131 if gp
.stack
.lo
!= 0 {
1132 // Free whole stack - it will get reallocated
1133 // if G is used again.
1134 stackfree(gp
.stack
, gp
.stackAlloc
)
1142 if gp
.stack
.lo
== 0 {
1143 throw("missing stack in shrinkstack")
1145 if gstatus
&_Gscan
== 0 {
1146 throw("bad status in shrinkstack")
1149 if debug
.gcshrinkstackoff
> 0 {
1152 if gp
.startpc
== gcBgMarkWorkerPC
{
1153 // We're not allowed to shrink the gcBgMarkWorker
1154 // stack (see gcBgMarkWorker for explanation).
1158 oldsize
:= gp
.stackAlloc
1159 newsize
:= oldsize
/ 2
1160 // Don't shrink the allocation below the minimum-sized stack
1162 if newsize
< _FixedStack
{
1165 // Compute how much of the stack is currently in use and only
1166 // shrink the stack if gp is using less than a quarter of its
1167 // current stack. The currently used stack includes everything
1168 // down to the SP plus the stack guard space that ensures
1169 // there's room for nosplit functions.
1170 avail
:= gp
.stack
.hi
- gp
.stack
.lo
1171 if used
:= gp
.stack
.hi
- gp
.sched
.sp
+ _StackLimit
; used
>= avail
/4 {
1175 // We can't copy the stack if we're in a syscall.
1176 // The syscall might have pointers into the stack.
1177 if gp
.syscallsp
!= 0 {
1180 if sys
.GoosWindows
!= 0 && gp
.m
!= nil && gp
.m
.libcallsp
!= 0 {
1185 print("shrinking stack ", oldsize
, "->", newsize
, "\n")
1188 copystack(gp
, newsize
, false)
1191 // freeStackSpans frees unused stack spans at the end of GC.
1192 func freeStackSpans() {
1195 // Scan stack pools for empty stack spans.
1196 for order
:= range stackpool
{
1197 list
:= &stackpool
[order
]
1198 for s
:= list
.first
; s
!= nil; {
1200 if s
.allocCount
== 0 {
1209 unlock(&stackpoolmu
)
1211 // Free large stack spans.
1212 lock(&stackLarge
.lock
)
1213 for i
:= range stackLarge
.free
{
1214 for s
:= stackLarge
.free
[i
].first
; s
!= nil; {
1216 stackLarge
.free
[i
].remove(s
)
1221 unlock(&stackLarge
.lock
)
1226 systemstack(func() {
1227 throw("attempt to execute C code on Go stack")