1 // Copyright 2014 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
8 "runtime/internal/atomic"
12 // For gccgo, use go:linkname to export compiler-called functions.
14 //go:linkname deferproc
15 //go:linkname deferprocStack
16 //go:linkname deferreturn
17 //go:linkname setdeferretaddr
18 //go:linkname checkdefer
20 //go:linkname canrecover
21 //go:linkname makefuncfficanrecover
22 //go:linkname makefuncreturning
23 //go:linkname gorecover
24 //go:linkname deferredrecover
25 //go:linkname goPanicIndex
26 //go:linkname goPanicIndexU
27 //go:linkname goPanicSliceAlen
28 //go:linkname goPanicSliceAlenU
29 //go:linkname goPanicSliceAcap
30 //go:linkname goPanicSliceAcapU
31 //go:linkname goPanicSliceB
32 //go:linkname goPanicSliceBU
33 //go:linkname goPanicSlice3Alen
34 //go:linkname goPanicSlice3AlenU
35 //go:linkname goPanicSlice3Acap
36 //go:linkname goPanicSlice3AcapU
37 //go:linkname goPanicSlice3B
38 //go:linkname goPanicSlice3BU
39 //go:linkname goPanicSlice3C
40 //go:linkname goPanicSlice3CU
41 //go:linkname goPanicSliceConvert
42 //go:linkname panicshift
43 //go:linkname panicdivide
44 //go:linkname panicmem
45 // Temporary for C code to call:
48 // Check to make sure we can really generate a panic. If the panic
49 // was generated from the runtime, or from inside malloc, then convert
51 // pc should be the program counter of the compiler-generated code that
52 // triggered this panic.
53 func panicCheck1(pc
uintptr, msg
string) {
54 name
, _
, _
, _
:= funcfileline(pc
-1, -1, false)
55 if hasPrefix(name
, "runtime.") {
58 // TODO: is this redundant? How could we be in malloc
59 // but not in the runtime? runtime/internal/*, maybe?
61 if gp
!= nil && gp
.m
!= nil && gp
.m
.mallocing
!= 0 {
66 // Same as above, but calling from the runtime is allowed.
68 // Using this function is necessary for any panic that may be
69 // generated by runtime.sigpanic, since those are always called by the
71 func panicCheck2(err
string) {
72 // panic allocates, so to avoid recursive malloc, turn panics
73 // during malloc into throws.
75 if gp
!= nil && gp
.m
!= nil && gp
.m
.mallocing
!= 0 {
80 // Many of the following panic entry-points turn into throws when they
81 // happen in various runtime contexts. These should never happen in
82 // the runtime, and if they do, they indicate a serious issue and
83 // should not be caught by user code.
85 // The panic{Index,Slice,divide,shift} functions are called by
86 // code generated by the compiler for out of bounds index expressions,
87 // out of bounds slice expressions, division by zero, and shift by negative.
88 // The panicdivide (again), panicoverflow, panicfloat, and panicmem
89 // functions are called by the signal handler when a signal occurs
90 // indicating the respective problem.
92 // Since panic{Index,Slice,shift} are never called directly, and
93 // since the runtime package should never have an out of bounds slice
94 // or array reference or negative shift, if we see those functions called from the
95 // runtime package we turn the panic into a throw. That will dump the
96 // entire runtime stack for easier debugging.
98 // The entry points called by the signal handler will be called from
99 // runtime.sigpanic, so we can't disallow calls from the runtime to
100 // these (they always look like they're called from the runtime).
101 // Hence, for these, we just check for clearly bad runtime conditions.
103 // failures in the comparisons for s[x], 0 <= x < y (y == len(s))
104 func goPanicIndex(x
int, y
int) {
105 panicCheck1(getcallerpc(), "index out of range")
106 panic(boundsError
{x
: int64(x
), signed
: true, y
: y
, code
: boundsIndex
})
108 func goPanicIndexU(x
uint, y
int) {
109 panicCheck1(getcallerpc(), "index out of range")
110 panic(boundsError
{x
: int64(x
), signed
: false, y
: y
, code
: boundsIndex
})
113 // failures in the comparisons for s[:x], 0 <= x <= y (y == len(s) or cap(s))
114 func goPanicSliceAlen(x
int, y
int) {
115 panicCheck1(getcallerpc(), "slice bounds out of range")
116 panic(boundsError
{x
: int64(x
), signed
: true, y
: y
, code
: boundsSliceAlen
})
118 func goPanicSliceAlenU(x
uint, y
int) {
119 panicCheck1(getcallerpc(), "slice bounds out of range")
120 panic(boundsError
{x
: int64(x
), signed
: false, y
: y
, code
: boundsSliceAlen
})
122 func goPanicSliceAcap(x
int, y
int) {
123 panicCheck1(getcallerpc(), "slice bounds out of range")
124 panic(boundsError
{x
: int64(x
), signed
: true, y
: y
, code
: boundsSliceAcap
})
126 func goPanicSliceAcapU(x
uint, y
int) {
127 panicCheck1(getcallerpc(), "slice bounds out of range")
128 panic(boundsError
{x
: int64(x
), signed
: false, y
: y
, code
: boundsSliceAcap
})
131 // failures in the comparisons for s[x:y], 0 <= x <= y
132 func goPanicSliceB(x
int, y
int) {
133 panicCheck1(getcallerpc(), "slice bounds out of range")
134 panic(boundsError
{x
: int64(x
), signed
: true, y
: y
, code
: boundsSliceB
})
136 func goPanicSliceBU(x
uint, y
int) {
137 panicCheck1(getcallerpc(), "slice bounds out of range")
138 panic(boundsError
{x
: int64(x
), signed
: false, y
: y
, code
: boundsSliceB
})
141 // failures in the comparisons for s[::x], 0 <= x <= y (y == len(s) or cap(s))
142 func goPanicSlice3Alen(x
int, y
int) {
143 panicCheck1(getcallerpc(), "slice bounds out of range")
144 panic(boundsError
{x
: int64(x
), signed
: true, y
: y
, code
: boundsSlice3Alen
})
146 func goPanicSlice3AlenU(x
uint, y
int) {
147 panicCheck1(getcallerpc(), "slice bounds out of range")
148 panic(boundsError
{x
: int64(x
), signed
: false, y
: y
, code
: boundsSlice3Alen
})
150 func goPanicSlice3Acap(x
int, y
int) {
151 panicCheck1(getcallerpc(), "slice bounds out of range")
152 panic(boundsError
{x
: int64(x
), signed
: true, y
: y
, code
: boundsSlice3Acap
})
154 func goPanicSlice3AcapU(x
uint, y
int) {
155 panicCheck1(getcallerpc(), "slice bounds out of range")
156 panic(boundsError
{x
: int64(x
), signed
: false, y
: y
, code
: boundsSlice3Acap
})
159 // failures in the comparisons for s[:x:y], 0 <= x <= y
160 func goPanicSlice3B(x
int, y
int) {
161 panicCheck1(getcallerpc(), "slice bounds out of range")
162 panic(boundsError
{x
: int64(x
), signed
: true, y
: y
, code
: boundsSlice3B
})
164 func goPanicSlice3BU(x
uint, y
int) {
165 panicCheck1(getcallerpc(), "slice bounds out of range")
166 panic(boundsError
{x
: int64(x
), signed
: false, y
: y
, code
: boundsSlice3B
})
169 // failures in the comparisons for s[x:y:], 0 <= x <= y
170 func goPanicSlice3C(x
int, y
int) {
171 panicCheck1(getcallerpc(), "slice bounds out of range")
172 panic(boundsError
{x
: int64(x
), signed
: true, y
: y
, code
: boundsSlice3C
})
174 func goPanicSlice3CU(x
uint, y
int) {
175 panicCheck1(getcallerpc(), "slice bounds out of range")
176 panic(boundsError
{x
: int64(x
), signed
: false, y
: y
, code
: boundsSlice3C
})
179 // failures in the conversion (*[x]T)s, 0 <= x <= y, x == cap(s)
180 func goPanicSliceConvert(x
int, y
int) {
181 panicCheck1(getcallerpc(), "slice length too short to convert to pointer to array")
182 panic(boundsError
{x
: int64(x
), signed
: true, y
: y
, code
: boundsConvert
})
185 var shiftError
= error(errorString("negative shift amount"))
188 panicCheck1(getcallerpc(), "negative shift amount")
192 var divideError
= error(errorString("integer divide by zero"))
195 panicCheck2("integer divide by zero")
199 var overflowError
= error(errorString("integer overflow"))
201 func panicoverflow() {
202 panicCheck2("integer overflow")
206 var floatError
= error(errorString("floating point error"))
209 panicCheck2("floating point error")
213 var memoryError
= error(errorString("invalid memory address or nil pointer dereference"))
216 panicCheck2("invalid memory address or nil pointer dereference")
220 func panicmemAddr(addr
uintptr) {
221 panicCheck2("invalid memory address or nil pointer dereference")
222 panic(errorAddressString
{msg
: "invalid memory address or nil pointer dereference", addr
: addr
})
225 // deferproc creates a new deferred function.
226 // The compiler turns a defer statement into a call to this.
227 // frame points into the stack frame; it is used to determine which
228 // deferred functions are for the current stack frame, and whether we
229 // have already deferred functions for this frame.
230 // pfn is a C function pointer.
231 // arg is a value to pass to pfn.
232 func deferproc(frame
*bool, pfn
uintptr, arg unsafe
.Pointer
) {
236 throw("deferproc: d.panic != nil after newdefer")
241 d
.panicStack
= getg()._panic
245 d
.makefunccanrecover
= false
248 // deferprocStack queues a new deferred function with a defer record on the stack.
249 // The defer record, d, does not need to be initialized.
250 // Other arguments are the same as in deferproc.
252 func deferprocStack(d
*_defer
, frame
*bool, pfn
uintptr, arg unsafe
.Pointer
) {
255 // go code on the system stack can't defer
256 throw("defer on system stack")
260 d
.makefunccanrecover
= false
262 // The lines below implement:
266 // d.panicStack = gp._panic
267 // d.link = gp._defer
268 // But without write barriers. They are writes to the stack so they
269 // don't need a write barrier, and furthermore are to uninitialized
270 // memory, so they must not use a write barrier.
271 *(*uintptr)(unsafe
.Pointer(&d
.frame
)) = uintptr(unsafe
.Pointer(frame
))
272 *(*uintptr)(unsafe
.Pointer(&d
.arg
)) = uintptr(unsafe
.Pointer(arg
))
273 *(*uintptr)(unsafe
.Pointer(&d
._panic
)) = 0
274 *(*uintptr)(unsafe
.Pointer(&d
.panicStack
)) = uintptr(unsafe
.Pointer(gp
._panic
))
275 *(*uintptr)(unsafe
.Pointer(&d
.link
)) = uintptr(unsafe
.Pointer(gp
._defer
))
280 // Allocate a Defer, usually using per-P pool.
281 // Each defer must be released with freedefer.
282 func newdefer() *_defer
{
286 if len(pp
.deferpool
) == 0 && sched
.deferpool
!= nil {
287 lock(&sched
.deferlock
)
288 for len(pp
.deferpool
) < cap(pp
.deferpool
)/2 && sched
.deferpool
!= nil {
290 sched
.deferpool
= d
.link
292 pp
.deferpool
= append(pp
.deferpool
, d
)
294 unlock(&sched
.deferlock
)
296 if n
:= len(pp
.deferpool
); n
> 0 {
297 d
= pp
.deferpool
[n
-1]
298 pp
.deferpool
[n
-1] = nil
299 pp
.deferpool
= pp
.deferpool
[:n
-1]
305 // Allocate new defer.
312 // Free the given defer.
313 // The defer cannot be used after this call.
315 // This is nosplit because the incoming defer is in a perilous state.
316 // It's not on any defer list, so stack copying won't adjust stack
317 // pointers in it (namely, d.link). Hence, if we were to copy the
318 // stack, d could then contain a stale pointer.
321 func freedefer(d
*_defer
) {
323 // After this point we can copy the stack.
336 if len(pp
.deferpool
) == cap(pp
.deferpool
) {
337 // Transfer half of local cache to the central cache.
339 // Take this slow path on the system stack so
340 // we don't grow freedefer's stack.
342 var first
, last
*_defer
343 for len(pp
.deferpool
) > cap(pp
.deferpool
)/2 {
344 n
:= len(pp
.deferpool
)
345 d
:= pp
.deferpool
[n
-1]
346 pp
.deferpool
[n
-1] = nil
347 pp
.deferpool
= pp
.deferpool
[:n
-1]
355 lock(&sched
.deferlock
)
356 last
.link
= sched
.deferpool
357 sched
.deferpool
= first
358 unlock(&sched
.deferlock
)
364 pp
.deferpool
= append(pp
.deferpool
, d
)
370 // Separate function so that it can split stack.
371 // Windows otherwise runs out of stack space.
372 func freedeferpanic() {
373 // _panic must be cleared before d is unlinked from gp.
374 throw("freedefer with d._panic != nil")
378 // fn must be cleared before d is unlinked from gp.
379 throw("freedefer with d.fn != nil")
382 // deferreturn is called to undefer the stack.
383 // The compiler inserts a call to this function as a finally clause
384 // wrapped around the body of any function that calls defer.
385 // The frame argument points to the stack frame of the function.
386 func deferreturn(frame
*bool) {
388 for gp
._defer
!= nil && gp
._defer
.frame
== frame
{
394 // This is rather awkward.
395 // The gc compiler does this using assembler
397 var fn
func(unsafe
.Pointer
)
398 *(*uintptr)(unsafe
.Pointer(&fn
)) = uintptr(noescape(unsafe
.Pointer(&pfn
)))
404 // If that was CgocallBackDone, it will have freed the
405 // defer for us, since we are no longer running as Go code.
410 if gp
.ranCgocallBackDone
{
411 gp
.ranCgocallBackDone
= false
420 // Since we are executing a defer function now, we
421 // know that we are returning from the calling
422 // function. If the calling function, or one of its
423 // callees, panicked, then the defer functions would
424 // be executed by panic.
429 // __builtin_extract_return_addr is a GCC intrinsic that converts an
430 // address returned by __builtin_return_address(0) to a real address.
431 // On most architectures this is a nop.
432 //extern __builtin_extract_return_addr
433 func __builtin_extract_return_addr(uintptr) uintptr
435 // setdeferretaddr records the address to which the deferred function
436 // returns. This is check by canrecover. The frontend relies on this
437 // function returning false.
438 func setdeferretaddr(retaddr
uintptr) bool {
440 if gp
._defer
!= nil {
441 gp
._defer
.retaddr
= __builtin_extract_return_addr(retaddr
)
446 // checkdefer is called by exception handlers used when unwinding the
447 // stack after a recovered panic. The exception handler is simply
450 // If we have not yet reached the frame we are looking for, we
451 // continue unwinding.
452 func checkdefer(frame
*bool) {
455 // We should never wind up here. Even if some other
456 // language throws an exception, the cgo code
457 // should ensure that g is set.
458 throw("no g in checkdefer")
459 } else if gp
.isforeign
{
460 // Some other language has thrown an exception.
461 // We need to run the local defer handlers.
462 // If they call recover, we stop unwinding here.
466 gp
._panic
= (*_panic
)(noescape(unsafe
.Pointer(&p
)))
469 if d
== nil || d
.frame
!= frame || d
.pfn
== 0 {
476 var fn
func(unsafe
.Pointer
)
477 *(*uintptr)(unsafe
.Pointer(&fn
)) = uintptr(noescape(unsafe
.Pointer(&pfn
)))
485 // The recover function caught the panic
486 // thrown by some other language.
491 recovered
:= p
.recovered
495 // Just return and continue executing Go code.
500 // We are panicking through this function.
502 } else if gp
._defer
!= nil && gp
._defer
.pfn
== 0 && gp
._defer
.frame
== frame
{
503 // This is the defer function that called recover.
504 // Simply return to stop the stack unwind, and let the
505 // Go code continue to execute.
510 // We are returning from this function.
516 // This is some other defer function. It was already run by
517 // the call to panic, or just above. Rethrow the exception.
519 throw("rethrowException returned")
522 // unwindStack starts unwinding the stack for a panic. We unwind
523 // function calls until we reach the one which used a defer function
524 // which called recover. Each function which uses a defer statement
525 // will have an exception handler, as shown above for checkdefer.
527 // Allocate the exception type used by the unwind ABI.
528 // It would be nice to define it in runtime_sysinfo.go,
529 // but current definitions don't work because the required
530 // alignment is larger than can be represented in Go.
531 // The type never contains any Go pointers.
532 size
:= unwindExceptionSize()
533 usize
:= uintptr(unsafe
.Sizeof(uintptr(0)))
534 c
:= (size
+ usize
- 1) / usize
535 s
:= make([]uintptr, c
)
536 getg().exception
= unsafe
.Pointer(&s
[0])
540 // Goexit terminates the goroutine that calls it. No other goroutine is affected.
541 // Goexit runs all deferred calls before terminating the goroutine. Because Goexit
542 // is not a panic, any recover calls in those deferred functions will return nil.
544 // Calling Goexit from the main goroutine terminates that goroutine
545 // without func main returning. Since func main has not returned,
546 // the program continues execution of other goroutines.
547 // If all other goroutines exit, the program crashes.
549 // Run all deferred functions for the current goroutine.
550 // This code is similar to gopanic, see that implementation
551 // for detailed comments.
555 // Create a panic object for Goexit, so we can recognize when it might be
556 // bypassed by a recover().
560 gp
._panic
= (*_panic
)(noescape(unsafe
.Pointer(&p
)))
571 d
._panic
.aborted
= true
580 var fn
func(unsafe
.Pointer
)
581 *(*uintptr)(unsafe
.Pointer(&fn
)) = uintptr(noescape(unsafe
.Pointer(&pfn
)))
587 throw("bad defer entry in Goexit")
592 // Note: we ignore recovers here because Goexit isn't a panic
598 // Call all Error and String methods before freezing the world.
599 // Used when crashing with panicking.
600 func preprintpanics(p
*_panic
) {
602 if recover() != nil {
603 throw("panic while printing panic value")
607 switch v
:= p
.arg
.(type) {
617 // Print all currently active panics. Used when crashing.
618 // Should only be called after preprintpanics.
619 func printpanics(p
*_panic
) {
632 print(" [recovered]")
637 // The implementation of the predeclared function panic.
638 func gopanic(e any
) {
644 throw("panic on system stack")
647 if gp
.m
.mallocing
!= 0 {
651 throw("panic during malloc")
653 if gp
.m
.preemptoff
!= "" {
657 print("preempt off reason: ")
658 print(gp
.m
.preemptoff
)
660 throw("panic during preemptoff")
666 throw("panic holding locks")
669 // The gc compiler allocates this new _panic struct on the
670 // stack. We can't do that, because when a deferred function
671 // recovers the panic we unwind the stack. We unlink this
672 // entry before unwinding the stack, but that doesn't help in
673 // the case where we panic, a deferred function recovers and
674 // then panics itself, that panic is in turn recovered, and
675 // unwinds the stack past this stack frame.
683 atomic
.Xadd(&runningPanicDefers
, 1)
693 // If defer was started by earlier panic or Goexit (and, since we're back here, that triggered a new panic),
694 // take defer off list. The earlier panic or Goexit will not continue running.
697 d
._panic
.aborted
= true
706 // Record the panic that is running the defer.
707 // If there is a new panic during the deferred call, that panic
708 // will find d in the list and will mark d._panic (this panic) aborted.
711 var fn
func(unsafe
.Pointer
)
712 *(*uintptr)(unsafe
.Pointer(&fn
)) = uintptr(noescape(unsafe
.Pointer(&pfn
)))
718 throw("bad defer entry in panic")
724 if gp
._panic
!= nil && gp
._panic
.goexit
&& gp
._panic
.aborted
{
726 throw("Goexit returned")
728 atomic
.Xadd(&runningPanicDefers
, -1)
730 // Aborted panics are marked but remain on the g.panic list.
731 // Remove them from the list.
732 for gp
._panic
!= nil && gp
._panic
.aborted
{
733 gp
._panic
= gp
._panic
.link
735 if gp
._panic
== nil { // must be done with signal
739 if gp
._panic
!= nil && gp
._panic
.goexit
{
741 throw("Goexit returned")
744 // Unwind the stack by throwing an exception.
745 // The compiler has arranged to create
746 // exception handlers in each function
747 // that uses a defer statement. These
748 // exception handlers will check whether
749 // the entry on the top of the defer stack
750 // is from the current function. If it is,
751 // we have unwound the stack far enough.
754 throw("unwindStack returned")
757 // Because we executed that defer function by a panic,
758 // and it did not call recover, we know that we are
759 // not returning from the calling function--we are
760 // panicking through it.
763 // Deferred function did not panic. Remove d.
764 // In the p.recovered case, d will be removed by checkdefer.
770 // ran out of deferred calls - old-school panic now
771 // Because it is unsafe to call arbitrary user code after freezing
772 // the world, we call preprintpanics to invoke all necessary Error
773 // and String methods to prepare the panic strings before startpanic.
774 preprintpanics(gp
._panic
)
776 fatalpanic(gp
._panic
) // should not return
777 *(*int)(nil) = 0 // not reached
780 // currentDefer returns the top of the defer stack if it can be recovered.
781 // Otherwise it returns nil.
782 func currentDefer() *_defer
{
789 // The panic that would be recovered is the one on the top of
790 // the panic stack. We do not want to recover it if that panic
791 // was on the top of the panic stack when this function was
793 if d
.panicStack
== gp
._panic
{
797 // The deferred thunk will call setdeferretaddr. If this has
798 // not happened, then we have not been called via defer, and
799 // we can not recover.
807 // canrecover is called by a thunk to see if the real function would
808 // be permitted to recover a panic value. Recovering a value is
809 // permitted if the thunk was called directly by defer. retaddr is the
810 // return address of the function that is calling canrecover--that is,
812 func canrecover(retaddr
uintptr) bool {
818 ret
:= __builtin_extract_return_addr(retaddr
)
820 if ret
<= dret
&& ret
+16 >= dret
{
824 // On some systems, in some cases, the return address does not
825 // work reliably. See http://gcc.gnu.org/PR60406. If we are
826 // permitted to call recover, the call stack will look like this:
827 // runtime.gopanic, runtime.deferreturn, etc.
828 // thunk to call deferred function (calls __go_set_defer_retaddr)
829 // function that calls __go_can_recover (passing return address)
830 // runtime.canrecover
831 // Calling callers will skip the thunks. So if our caller's
832 // caller starts with "runtime.", then we are permitted to
834 var locs
[16]location
835 if callers(1, locs
[:2]) < 2 {
839 name
:= locs
[1].function
840 if hasPrefix(name
, "runtime.") {
844 // If the function calling recover was created by reflect.MakeFunc,
845 // then makefuncfficanrecover will have set makefunccanrecover.
846 if !d
.makefunccanrecover
{
850 // We look up the stack, ignoring libffi functions and
851 // functions in the reflect package, until we find
852 // reflect.makeFuncStub or reflect.ffi_callback called by FFI
853 // functions. Then we check the caller of that function.
855 n
:= callers(2, locs
[:])
856 foundFFICallback
:= false
859 name
= locs
[i
].function
861 // No function name means this caller isn't Go code.
862 // Assume that this is libffi.
866 // Ignore function in libffi.
867 if hasPrefix(name
, "ffi_") {
871 if foundFFICallback
{
875 if name
== "reflect.ffi_callback" {
876 foundFFICallback
= true
880 // Ignore other functions in the reflect package.
881 if hasPrefix(name
, "reflect.") ||
hasPrefix(name
, ".1reflect.") {
885 // We should now be looking at the real caller.
890 name
= locs
[i
].function
891 if hasPrefix(name
, "runtime.") {
899 // This function is called when code is about to enter a function
900 // created by the libffi version of reflect.MakeFunc. This function is
901 // passed the names of the callers of the libffi code that called the
902 // stub. It uses them to decide whether it is permitted to call
903 // recover, and sets d.makefunccanrecover so that gorecover can make
904 // the same decision.
905 func makefuncfficanrecover(loc
[]location
) {
911 // If we are already in a call stack of MakeFunc functions,
912 // there is nothing we can usefully check here.
913 if d
.makefunccanrecover
{
917 // loc starts with the caller of our caller. That will be a thunk.
918 // If its caller was a function function, then it was called
919 // directly by defer.
924 name
:= loc
[1].function
925 if hasPrefix(name
, "runtime.") {
926 d
.makefunccanrecover
= true
930 // makefuncreturning is called when code is about to exit a function
931 // created by reflect.MakeFunc. It is called by the function stub used
932 // by reflect.MakeFunc. It clears the makefunccanrecover field. It's
933 // OK to always clear this field, because canrecover will only be
934 // called by a stub created for a function that calls recover. That
935 // stub will not call a function created by reflect.MakeFunc, so by
936 // the time we get here any caller higher up on the call stack no
937 // longer needs the information.
938 func makefuncreturning() {
941 d
.makefunccanrecover
= false
945 // The implementation of the predeclared function recover.
946 func gorecover() interface{} {
949 if p
!= nil && !p
.goexit
&& !p
.recovered
{
956 // deferredrecover is called when a call to recover is deferred. That
957 // is, something like
960 // We need to handle this specially. In gc, the recover function
961 // looks up the stack frame. In particular, that means that a deferred
962 // recover will not recover a panic thrown in the same function that
963 // defers the recover. It will only recover a panic thrown in a
964 // function that defers the deferred call to recover.
969 // defer recover() // does not stop panic
975 // defer recover() // stops panic(0)
982 // defer recover() // does not stop panic
991 // defer recover() // stops panic(0)
998 // The interesting case here is f3. As can be seen from f2, the
999 // deferred recover could pick up panic(1). However, this does not
1000 // happen because it is blocked by the panic(0).
1002 // When a function calls recover, then when we invoke it we pass a
1003 // hidden parameter indicating whether it should recover something.
1004 // This parameter is set based on whether the function is being
1005 // invoked directly from defer. The parameter winds up determining
1006 // whether __go_recover or __go_deferred_recover is called at all.
1008 // In the case of a deferred recover, the hidden parameter that
1009 // controls the call is actually the one set up for the function that
1010 // runs the defer recover() statement. That is the right thing in all
1011 // the cases above except for f3. In f3 the function is permitted to
1012 // call recover, but the deferred recover call is not. We address that
1013 // here by checking for that specific case before calling recover. If
1014 // this function was deferred when there is already a panic on the
1015 // panic stack, then we can only recover that panic, not any other.
1017 // Note that we can get away with using a special function here
1018 // because you are not permitted to take the address of a predeclared
1019 // function like recover.
1020 func deferredrecover() interface{} {
1022 if gp
._defer
== nil || gp
._defer
.panicStack
!= gp
._panic
{
1028 //go:linkname sync_throw sync.throw
1029 func sync_throw(s
string) {
1034 func throw(s
string) {
1035 // Everything throw does should be recursively nosplit so it
1036 // can be called even when it's unsafe to grow the stack.
1037 systemstack(func() {
1038 print("fatal error: ", s
, "\n")
1041 if gp
.m
.throwing
== 0 {
1045 *(*int)(nil) = 0 // not reached
1048 // runningPanicDefers is non-zero while running deferred functions for panic.
1049 // runningPanicDefers is incremented and decremented atomically.
1050 // This is used to try hard to get a panic stack trace out when exiting.
1051 var runningPanicDefers
uint32
1053 // panicking is non-zero when crashing the program for an unrecovered panic.
1054 // panicking is incremented and decremented atomically.
1055 var panicking
uint32
1057 // paniclk is held while printing the panic information and stack trace,
1058 // so that two concurrent panics don't overlap their output.
1061 // fatalthrow implements an unrecoverable runtime throw. It freezes the
1062 // system, prints stack traces starting from its caller, and terminates the
1073 if dopanic_m(gp
, pc
, sp
) {
1079 *(*int)(nil) = 0 // not reached
1082 // fatalpanic implements an unrecoverable panic. It is like fatalthrow, except
1083 // that if msgs != nil, fatalpanic also prints panic messages and decrements
1084 // runningPanicDefers once main is blocked from exiting.
1087 func fatalpanic(msgs
*_panic
) {
1093 if startpanic_m() && msgs
!= nil {
1094 // There were panic messages and startpanic_m
1095 // says it's okay to try to print them.
1097 // startpanic_m set panicking, which will
1098 // block main from exiting, so now OK to
1099 // decrement runningPanicDefers.
1100 atomic
.Xadd(&runningPanicDefers
, -1)
1105 docrash
= dopanic_m(gp
, pc
, sp
)
1108 // By crashing outside the above systemstack call, debuggers
1109 // will not be confused when generating a backtrace.
1110 // Function crash is marked nosplit to avoid stack growth.
1114 systemstack(func() {
1118 *(*int)(nil) = 0 // not reached
1121 // startpanic_m prepares for an unrecoverable panic.
1123 // It returns true if panic messages should be printed, or false if
1124 // the runtime is in bad shape and should just print stacks.
1126 // It must not have write barriers even though the write barrier
1127 // explicitly ignores writes once dying > 0. Write barriers still
1128 // assume that g.m.p != nil, and this function may not have P
1129 // in some contexts (e.g. a panic in a signal handler for a signal
1130 // sent to an M with no P).
1132 //go:nowritebarrierrec
1133 func startpanic_m() bool {
1135 if mheap_
.cachealloc
.size
== 0 { // very early
1136 print("runtime: panic before malloc heap initialized\n")
1138 // Disallow malloc during an unrecoverable panic. A panic
1139 // could happen in a signal handler, or in a throw, or inside
1140 // malloc itself. We want to catch if an allocation ever does
1141 // happen (even if we're not in one of these situations).
1144 // If we're dying because of a bad lock count, set it to a
1145 // good lock count so we don't recursively panic below.
1146 if _g_
.m
.locks
< 0 {
1150 switch _g_
.m
.dying
{
1152 // Setting dying >0 has the side-effect of disabling this G's writebuf.
1154 atomic
.Xadd(&panicking
, 1)
1156 if debug
.schedtrace
> 0 || debug
.scheddetail
> 0 {
1162 // Something failed while panicking.
1163 // Just print a stack trace and exit.
1165 print("panic during panic\n")
1168 // This is a genuine bug in the runtime, we couldn't even
1169 // print the stack trace successfully.
1171 print("stack trace unavailable\n")
1175 // Can't even print! Just exit.
1177 return false // Need to return something.
1184 func dopanic_m(gp
*g
, pc
, sp
uintptr) bool {
1186 signame
:= signame(gp
.sig
)
1188 print("[signal ", signame
)
1190 print("[signal ", hex(gp
.sig
))
1192 print(" code=", hex(gp
.sigcode0
), " addr=", hex(gp
.sigcode1
), " pc=", hex(gp
.sigpc
), "]\n")
1195 level
, all
, docrash
:= gotraceback()
1198 if gp
!= gp
.m
.curg
{
1205 } else if level
>= 2 || _g_
.m
.throwing
> 0 {
1206 print("\nruntime stack:\n")
1209 if !didothers
&& all
{
1216 if atomic
.Xadd(&panicking
, -1) != 0 {
1217 // Some other m is panicking too.
1218 // Let it print what it needs to print.
1219 // Wait forever without chewing up cpu.
1220 // It will exit when it's done.
1230 // canpanic returns false if a signal should throw instead of
1234 func canpanic(gp
*g
) bool {
1235 // Note that g is m->gsignal, different from gp.
1236 // Note also that g->m can change at preemption, so m can go stale
1237 // if this function ever makes a function call.
1241 // Is it okay for gp to panic instead of crashing the program?
1242 // Yes, as long as it is running Go code, not runtime code,
1243 // and not stuck in a system call.
1244 if gp
== nil || gp
!= mp
.curg
{
1247 if mp
.locks
!= 0 || mp
.mallocing
!= 0 || mp
.throwing
!= 0 || mp
.preemptoff
!= "" || mp
.dying
!= 0 {
1250 status
:= readgstatus(gp
)
1251 if status
&^_Gscan
!= _Grunning || gp
.syscallsp
!= 0 {
1257 // isAbortPC reports whether pc is the program counter at which
1258 // runtime.abort raises a signal.
1260 // It is nosplit because it's part of the isgoexception
1264 func isAbortPC(pc
uintptr) bool {