c++: Allow IS_FAKE_BASE_TYPE for union types [PR114954]
[official-gcc.git] / libgo / go / runtime / panic.go
blob49c5f5e615feafddbbee8e0ae2da7bf56fd7160f
1 // Copyright 2014 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 package runtime
7 import (
8 "runtime/internal/atomic"
9 "unsafe"
12 // For gccgo, use go:linkname to export compiler-called functions.
14 //go:linkname deferproc
15 //go:linkname deferprocStack
16 //go:linkname deferreturn
17 //go:linkname setdeferretaddr
18 //go:linkname checkdefer
19 //go:linkname gopanic
20 //go:linkname canrecover
21 //go:linkname makefuncfficanrecover
22 //go:linkname makefuncreturning
23 //go:linkname gorecover
24 //go:linkname deferredrecover
25 //go:linkname goPanicIndex
26 //go:linkname goPanicIndexU
27 //go:linkname goPanicSliceAlen
28 //go:linkname goPanicSliceAlenU
29 //go:linkname goPanicSliceAcap
30 //go:linkname goPanicSliceAcapU
31 //go:linkname goPanicSliceB
32 //go:linkname goPanicSliceBU
33 //go:linkname goPanicSlice3Alen
34 //go:linkname goPanicSlice3AlenU
35 //go:linkname goPanicSlice3Acap
36 //go:linkname goPanicSlice3AcapU
37 //go:linkname goPanicSlice3B
38 //go:linkname goPanicSlice3BU
39 //go:linkname goPanicSlice3C
40 //go:linkname goPanicSlice3CU
41 //go:linkname goPanicSliceConvert
42 //go:linkname panicshift
43 //go:linkname panicdivide
44 //go:linkname panicmem
45 // Temporary for C code to call:
46 //go:linkname throw
48 // Check to make sure we can really generate a panic. If the panic
49 // was generated from the runtime, or from inside malloc, then convert
50 // to a throw of msg.
51 // pc should be the program counter of the compiler-generated code that
52 // triggered this panic.
53 func panicCheck1(pc uintptr, msg string) {
54 name, _, _, _ := funcfileline(pc-1, -1, false)
55 if hasPrefix(name, "runtime.") {
56 throw(msg)
58 // TODO: is this redundant? How could we be in malloc
59 // but not in the runtime? runtime/internal/*, maybe?
60 gp := getg()
61 if gp != nil && gp.m != nil && gp.m.mallocing != 0 {
62 throw(msg)
66 // Same as above, but calling from the runtime is allowed.
68 // Using this function is necessary for any panic that may be
69 // generated by runtime.sigpanic, since those are always called by the
70 // runtime.
71 func panicCheck2(err string) {
72 // panic allocates, so to avoid recursive malloc, turn panics
73 // during malloc into throws.
74 gp := getg()
75 if gp != nil && gp.m != nil && gp.m.mallocing != 0 {
76 throw(err)
80 // Many of the following panic entry-points turn into throws when they
81 // happen in various runtime contexts. These should never happen in
82 // the runtime, and if they do, they indicate a serious issue and
83 // should not be caught by user code.
85 // The panic{Index,Slice,divide,shift} functions are called by
86 // code generated by the compiler for out of bounds index expressions,
87 // out of bounds slice expressions, division by zero, and shift by negative.
88 // The panicdivide (again), panicoverflow, panicfloat, and panicmem
89 // functions are called by the signal handler when a signal occurs
90 // indicating the respective problem.
92 // Since panic{Index,Slice,shift} are never called directly, and
93 // since the runtime package should never have an out of bounds slice
94 // or array reference or negative shift, if we see those functions called from the
95 // runtime package we turn the panic into a throw. That will dump the
96 // entire runtime stack for easier debugging.
98 // The entry points called by the signal handler will be called from
99 // runtime.sigpanic, so we can't disallow calls from the runtime to
100 // these (they always look like they're called from the runtime).
101 // Hence, for these, we just check for clearly bad runtime conditions.
103 // failures in the comparisons for s[x], 0 <= x < y (y == len(s))
104 func goPanicIndex(x int, y int) {
105 panicCheck1(getcallerpc(), "index out of range")
106 panic(boundsError{x: int64(x), signed: true, y: y, code: boundsIndex})
108 func goPanicIndexU(x uint, y int) {
109 panicCheck1(getcallerpc(), "index out of range")
110 panic(boundsError{x: int64(x), signed: false, y: y, code: boundsIndex})
113 // failures in the comparisons for s[:x], 0 <= x <= y (y == len(s) or cap(s))
114 func goPanicSliceAlen(x int, y int) {
115 panicCheck1(getcallerpc(), "slice bounds out of range")
116 panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSliceAlen})
118 func goPanicSliceAlenU(x uint, y int) {
119 panicCheck1(getcallerpc(), "slice bounds out of range")
120 panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSliceAlen})
122 func goPanicSliceAcap(x int, y int) {
123 panicCheck1(getcallerpc(), "slice bounds out of range")
124 panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSliceAcap})
126 func goPanicSliceAcapU(x uint, y int) {
127 panicCheck1(getcallerpc(), "slice bounds out of range")
128 panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSliceAcap})
131 // failures in the comparisons for s[x:y], 0 <= x <= y
132 func goPanicSliceB(x int, y int) {
133 panicCheck1(getcallerpc(), "slice bounds out of range")
134 panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSliceB})
136 func goPanicSliceBU(x uint, y int) {
137 panicCheck1(getcallerpc(), "slice bounds out of range")
138 panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSliceB})
141 // failures in the comparisons for s[::x], 0 <= x <= y (y == len(s) or cap(s))
142 func goPanicSlice3Alen(x int, y int) {
143 panicCheck1(getcallerpc(), "slice bounds out of range")
144 panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3Alen})
146 func goPanicSlice3AlenU(x uint, y int) {
147 panicCheck1(getcallerpc(), "slice bounds out of range")
148 panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3Alen})
150 func goPanicSlice3Acap(x int, y int) {
151 panicCheck1(getcallerpc(), "slice bounds out of range")
152 panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3Acap})
154 func goPanicSlice3AcapU(x uint, y int) {
155 panicCheck1(getcallerpc(), "slice bounds out of range")
156 panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3Acap})
159 // failures in the comparisons for s[:x:y], 0 <= x <= y
160 func goPanicSlice3B(x int, y int) {
161 panicCheck1(getcallerpc(), "slice bounds out of range")
162 panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3B})
164 func goPanicSlice3BU(x uint, y int) {
165 panicCheck1(getcallerpc(), "slice bounds out of range")
166 panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3B})
169 // failures in the comparisons for s[x:y:], 0 <= x <= y
170 func goPanicSlice3C(x int, y int) {
171 panicCheck1(getcallerpc(), "slice bounds out of range")
172 panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3C})
174 func goPanicSlice3CU(x uint, y int) {
175 panicCheck1(getcallerpc(), "slice bounds out of range")
176 panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3C})
179 // failures in the conversion (*[x]T)s, 0 <= x <= y, x == cap(s)
180 func goPanicSliceConvert(x int, y int) {
181 panicCheck1(getcallerpc(), "slice length too short to convert to pointer to array")
182 panic(boundsError{x: int64(x), signed: true, y: y, code: boundsConvert})
185 var shiftError = error(errorString("negative shift amount"))
187 func panicshift() {
188 panicCheck1(getcallerpc(), "negative shift amount")
189 panic(shiftError)
192 var divideError = error(errorString("integer divide by zero"))
194 func panicdivide() {
195 panicCheck2("integer divide by zero")
196 panic(divideError)
199 var overflowError = error(errorString("integer overflow"))
201 func panicoverflow() {
202 panicCheck2("integer overflow")
203 panic(overflowError)
206 var floatError = error(errorString("floating point error"))
208 func panicfloat() {
209 panicCheck2("floating point error")
210 panic(floatError)
213 var memoryError = error(errorString("invalid memory address or nil pointer dereference"))
215 func panicmem() {
216 panicCheck2("invalid memory address or nil pointer dereference")
217 panic(memoryError)
220 func panicmemAddr(addr uintptr) {
221 panicCheck2("invalid memory address or nil pointer dereference")
222 panic(errorAddressString{msg: "invalid memory address or nil pointer dereference", addr: addr})
225 // deferproc creates a new deferred function.
226 // The compiler turns a defer statement into a call to this.
227 // frame points into the stack frame; it is used to determine which
228 // deferred functions are for the current stack frame, and whether we
229 // have already deferred functions for this frame.
230 // pfn is a C function pointer.
231 // arg is a value to pass to pfn.
232 func deferproc(frame *bool, pfn uintptr, arg unsafe.Pointer) {
233 gp := getg()
234 d := newdefer()
235 if d._panic != nil {
236 throw("deferproc: d.panic != nil after newdefer")
238 d.link = gp._defer
239 gp._defer = d
240 d.frame = frame
241 d.panicStack = getg()._panic
242 d.pfn = pfn
243 d.arg = arg
244 d.retaddr = 0
245 d.makefunccanrecover = false
248 // deferprocStack queues a new deferred function with a defer record on the stack.
249 // The defer record, d, does not need to be initialized.
250 // Other arguments are the same as in deferproc.
251 //go:nosplit
252 func deferprocStack(d *_defer, frame *bool, pfn uintptr, arg unsafe.Pointer) {
253 gp := getg()
254 if gp.m.curg != gp {
255 // go code on the system stack can't defer
256 throw("defer on system stack")
258 d.pfn = pfn
259 d.retaddr = 0
260 d.makefunccanrecover = false
261 d.heap = false
262 // The lines below implement:
263 // d.frame = frame
264 // d.arg = arg
265 // d._panic = nil
266 // d.panicStack = gp._panic
267 // d.link = gp._defer
268 // But without write barriers. They are writes to the stack so they
269 // don't need a write barrier, and furthermore are to uninitialized
270 // memory, so they must not use a write barrier.
271 *(*uintptr)(unsafe.Pointer(&d.frame)) = uintptr(unsafe.Pointer(frame))
272 *(*uintptr)(unsafe.Pointer(&d.arg)) = uintptr(unsafe.Pointer(arg))
273 *(*uintptr)(unsafe.Pointer(&d._panic)) = 0
274 *(*uintptr)(unsafe.Pointer(&d.panicStack)) = uintptr(unsafe.Pointer(gp._panic))
275 *(*uintptr)(unsafe.Pointer(&d.link)) = uintptr(unsafe.Pointer(gp._defer))
277 gp._defer = d
280 // Allocate a Defer, usually using per-P pool.
281 // Each defer must be released with freedefer.
282 func newdefer() *_defer {
283 var d *_defer
284 mp := acquirem()
285 pp := mp.p.ptr()
286 if len(pp.deferpool) == 0 && sched.deferpool != nil {
287 lock(&sched.deferlock)
288 for len(pp.deferpool) < cap(pp.deferpool)/2 && sched.deferpool != nil {
289 d := sched.deferpool
290 sched.deferpool = d.link
291 d.link = nil
292 pp.deferpool = append(pp.deferpool, d)
294 unlock(&sched.deferlock)
296 if n := len(pp.deferpool); n > 0 {
297 d = pp.deferpool[n-1]
298 pp.deferpool[n-1] = nil
299 pp.deferpool = pp.deferpool[:n-1]
301 releasem(mp)
302 mp, pp = nil, nil
304 if d == nil {
305 // Allocate new defer.
306 d = new(_defer)
308 d.heap = true
309 return d
312 // Free the given defer.
313 // The defer cannot be used after this call.
315 // This is nosplit because the incoming defer is in a perilous state.
316 // It's not on any defer list, so stack copying won't adjust stack
317 // pointers in it (namely, d.link). Hence, if we were to copy the
318 // stack, d could then contain a stale pointer.
320 //go:nosplit
321 func freedefer(d *_defer) {
322 d.link = nil
323 // After this point we can copy the stack.
325 if d._panic != nil {
326 freedeferpanic()
328 if d.pfn != 0 {
329 freedeferfn()
331 if !d.heap {
332 return
334 mp := acquirem()
335 pp := mp.p.ptr()
336 if len(pp.deferpool) == cap(pp.deferpool) {
337 // Transfer half of local cache to the central cache.
339 // Take this slow path on the system stack so
340 // we don't grow freedefer's stack.
341 systemstack(func() {
342 var first, last *_defer
343 for len(pp.deferpool) > cap(pp.deferpool)/2 {
344 n := len(pp.deferpool)
345 d := pp.deferpool[n-1]
346 pp.deferpool[n-1] = nil
347 pp.deferpool = pp.deferpool[:n-1]
348 if first == nil {
349 first = d
350 } else {
351 last.link = d
353 last = d
355 lock(&sched.deferlock)
356 last.link = sched.deferpool
357 sched.deferpool = first
358 unlock(&sched.deferlock)
362 *d = _defer{}
364 pp.deferpool = append(pp.deferpool, d)
366 releasem(mp)
367 mp, pp = nil, nil
370 // Separate function so that it can split stack.
371 // Windows otherwise runs out of stack space.
372 func freedeferpanic() {
373 // _panic must be cleared before d is unlinked from gp.
374 throw("freedefer with d._panic != nil")
377 func freedeferfn() {
378 // fn must be cleared before d is unlinked from gp.
379 throw("freedefer with d.fn != nil")
382 // deferreturn is called to undefer the stack.
383 // The compiler inserts a call to this function as a finally clause
384 // wrapped around the body of any function that calls defer.
385 // The frame argument points to the stack frame of the function.
386 func deferreturn(frame *bool) {
387 gp := getg()
388 for gp._defer != nil && gp._defer.frame == frame {
389 d := gp._defer
390 pfn := d.pfn
391 d.pfn = 0
393 if pfn != 0 {
394 // This is rather awkward.
395 // The gc compiler does this using assembler
396 // code in jmpdefer.
397 var fn func(unsafe.Pointer)
398 *(*uintptr)(unsafe.Pointer(&fn)) = uintptr(noescape(unsafe.Pointer(&pfn)))
399 gp.deferring = true
400 fn(d.arg)
401 gp.deferring = false
404 // If that was CgocallBackDone, it will have freed the
405 // defer for us, since we are no longer running as Go code.
406 if getg() == nil {
407 *frame = true
408 return
410 if gp.ranCgocallBackDone {
411 gp.ranCgocallBackDone = false
412 *frame = true
413 return
416 gp._defer = d.link
418 freedefer(d)
420 // Since we are executing a defer function now, we
421 // know that we are returning from the calling
422 // function. If the calling function, or one of its
423 // callees, panicked, then the defer functions would
424 // be executed by panic.
425 *frame = true
429 // __builtin_extract_return_addr is a GCC intrinsic that converts an
430 // address returned by __builtin_return_address(0) to a real address.
431 // On most architectures this is a nop.
432 //extern __builtin_extract_return_addr
433 func __builtin_extract_return_addr(uintptr) uintptr
435 // setdeferretaddr records the address to which the deferred function
436 // returns. This is check by canrecover. The frontend relies on this
437 // function returning false.
438 func setdeferretaddr(retaddr uintptr) bool {
439 gp := getg()
440 if gp._defer != nil {
441 gp._defer.retaddr = __builtin_extract_return_addr(retaddr)
443 return false
446 // checkdefer is called by exception handlers used when unwinding the
447 // stack after a recovered panic. The exception handler is simply
448 // checkdefer(frame)
449 // return;
450 // If we have not yet reached the frame we are looking for, we
451 // continue unwinding.
452 func checkdefer(frame *bool) {
453 gp := getg()
454 if gp == nil {
455 // We should never wind up here. Even if some other
456 // language throws an exception, the cgo code
457 // should ensure that g is set.
458 throw("no g in checkdefer")
459 } else if gp.isforeign {
460 // Some other language has thrown an exception.
461 // We need to run the local defer handlers.
462 // If they call recover, we stop unwinding here.
463 var p _panic
464 p.isforeign = true
465 p.link = gp._panic
466 gp._panic = (*_panic)(noescape(unsafe.Pointer(&p)))
467 for {
468 d := gp._defer
469 if d == nil || d.frame != frame || d.pfn == 0 {
470 break
473 pfn := d.pfn
474 gp._defer = d.link
476 var fn func(unsafe.Pointer)
477 *(*uintptr)(unsafe.Pointer(&fn)) = uintptr(noescape(unsafe.Pointer(&pfn)))
478 gp.deferring = true
479 fn(d.arg)
480 gp.deferring = false
482 freedefer(d)
484 if p.recovered {
485 // The recover function caught the panic
486 // thrown by some other language.
487 break
491 recovered := p.recovered
492 gp._panic = p.link
494 if recovered {
495 // Just return and continue executing Go code.
496 *frame = true
497 return
500 // We are panicking through this function.
501 *frame = false
502 } else if gp._defer != nil && gp._defer.pfn == 0 && gp._defer.frame == frame {
503 // This is the defer function that called recover.
504 // Simply return to stop the stack unwind, and let the
505 // Go code continue to execute.
506 d := gp._defer
507 gp._defer = d.link
508 freedefer(d)
510 // We are returning from this function.
511 *frame = true
513 return
516 // This is some other defer function. It was already run by
517 // the call to panic, or just above. Rethrow the exception.
518 rethrowException()
519 throw("rethrowException returned")
522 // unwindStack starts unwinding the stack for a panic. We unwind
523 // function calls until we reach the one which used a defer function
524 // which called recover. Each function which uses a defer statement
525 // will have an exception handler, as shown above for checkdefer.
526 func unwindStack() {
527 // Allocate the exception type used by the unwind ABI.
528 // It would be nice to define it in runtime_sysinfo.go,
529 // but current definitions don't work because the required
530 // alignment is larger than can be represented in Go.
531 // The type never contains any Go pointers.
532 size := unwindExceptionSize()
533 usize := uintptr(unsafe.Sizeof(uintptr(0)))
534 c := (size + usize - 1) / usize
535 s := make([]uintptr, c)
536 getg().exception = unsafe.Pointer(&s[0])
537 throwException()
540 // Goexit terminates the goroutine that calls it. No other goroutine is affected.
541 // Goexit runs all deferred calls before terminating the goroutine. Because Goexit
542 // is not a panic, any recover calls in those deferred functions will return nil.
544 // Calling Goexit from the main goroutine terminates that goroutine
545 // without func main returning. Since func main has not returned,
546 // the program continues execution of other goroutines.
547 // If all other goroutines exit, the program crashes.
548 func Goexit() {
549 // Run all deferred functions for the current goroutine.
550 // This code is similar to gopanic, see that implementation
551 // for detailed comments.
552 gp := getg()
553 gp.goexiting = true
555 // Create a panic object for Goexit, so we can recognize when it might be
556 // bypassed by a recover().
557 var p _panic
558 p.goexit = true
559 p.link = gp._panic
560 gp._panic = (*_panic)(noescape(unsafe.Pointer(&p)))
562 for {
563 d := gp._defer
564 if d == nil {
565 break
568 pfn := d.pfn
569 if pfn == 0 {
570 if d._panic != nil {
571 d._panic.aborted = true
572 d._panic = nil
574 gp._defer = d.link
575 freedefer(d)
576 continue
578 d.pfn = 0
580 var fn func(unsafe.Pointer)
581 *(*uintptr)(unsafe.Pointer(&fn)) = uintptr(noescape(unsafe.Pointer(&pfn)))
582 gp.deferring = true
583 fn(d.arg)
584 gp.deferring = false
586 if gp._defer != d {
587 throw("bad defer entry in Goexit")
589 d._panic = nil
590 gp._defer = d.link
591 freedefer(d)
592 // Note: we ignore recovers here because Goexit isn't a panic
594 gp.goexiting = false
595 goexit1()
598 // Call all Error and String methods before freezing the world.
599 // Used when crashing with panicking.
600 func preprintpanics(p *_panic) {
601 defer func() {
602 if recover() != nil {
603 throw("panic while printing panic value")
606 for p != nil {
607 switch v := p.arg.(type) {
608 case error:
609 p.arg = v.Error()
610 case stringer:
611 p.arg = v.String()
613 p = p.link
617 // Print all currently active panics. Used when crashing.
618 // Should only be called after preprintpanics.
619 func printpanics(p *_panic) {
620 if p.link != nil {
621 printpanics(p.link)
622 if !p.link.goexit {
623 print("\t")
626 if p.goexit {
627 return
629 print("panic: ")
630 printany(p.arg)
631 if p.recovered {
632 print(" [recovered]")
634 print("\n")
637 // The implementation of the predeclared function panic.
638 func gopanic(e any) {
639 gp := getg()
640 if gp.m.curg != gp {
641 print("panic: ")
642 printany(e)
643 print("\n")
644 throw("panic on system stack")
647 if gp.m.mallocing != 0 {
648 print("panic: ")
649 printany(e)
650 print("\n")
651 throw("panic during malloc")
653 if gp.m.preemptoff != "" {
654 print("panic: ")
655 printany(e)
656 print("\n")
657 print("preempt off reason: ")
658 print(gp.m.preemptoff)
659 print("\n")
660 throw("panic during preemptoff")
662 if gp.m.locks != 0 {
663 print("panic: ")
664 printany(e)
665 print("\n")
666 throw("panic holding locks")
669 // The gc compiler allocates this new _panic struct on the
670 // stack. We can't do that, because when a deferred function
671 // recovers the panic we unwind the stack. We unlink this
672 // entry before unwinding the stack, but that doesn't help in
673 // the case where we panic, a deferred function recovers and
674 // then panics itself, that panic is in turn recovered, and
675 // unwinds the stack past this stack frame.
677 p := &_panic{
678 arg: e,
679 link: gp._panic,
681 gp._panic = p
683 atomic.Xadd(&runningPanicDefers, 1)
685 for {
686 d := gp._defer
687 if d == nil {
688 break
691 pfn := d.pfn
693 // If defer was started by earlier panic or Goexit (and, since we're back here, that triggered a new panic),
694 // take defer off list. The earlier panic or Goexit will not continue running.
695 if pfn == 0 {
696 if d._panic != nil {
697 d._panic.aborted = true
699 d._panic = nil
700 gp._defer = d.link
701 freedefer(d)
702 continue
704 d.pfn = 0
706 // Record the panic that is running the defer.
707 // If there is a new panic during the deferred call, that panic
708 // will find d in the list and will mark d._panic (this panic) aborted.
709 d._panic = p
711 var fn func(unsafe.Pointer)
712 *(*uintptr)(unsafe.Pointer(&fn)) = uintptr(noescape(unsafe.Pointer(&pfn)))
713 gp.deferring = true
714 fn(d.arg)
715 gp.deferring = false
717 if gp._defer != d {
718 throw("bad defer entry in panic")
720 d._panic = nil
722 if p.recovered {
723 gp._panic = p.link
724 if gp._panic != nil && gp._panic.goexit && gp._panic.aborted {
725 Goexit()
726 throw("Goexit returned")
728 atomic.Xadd(&runningPanicDefers, -1)
730 // Aborted panics are marked but remain on the g.panic list.
731 // Remove them from the list.
732 for gp._panic != nil && gp._panic.aborted {
733 gp._panic = gp._panic.link
735 if gp._panic == nil { // must be done with signal
736 gp.sig = 0
739 if gp._panic != nil && gp._panic.goexit {
740 Goexit()
741 throw("Goexit returned")
744 // Unwind the stack by throwing an exception.
745 // The compiler has arranged to create
746 // exception handlers in each function
747 // that uses a defer statement. These
748 // exception handlers will check whether
749 // the entry on the top of the defer stack
750 // is from the current function. If it is,
751 // we have unwound the stack far enough.
752 unwindStack()
754 throw("unwindStack returned")
757 // Because we executed that defer function by a panic,
758 // and it did not call recover, we know that we are
759 // not returning from the calling function--we are
760 // panicking through it.
761 *d.frame = false
763 // Deferred function did not panic. Remove d.
764 // In the p.recovered case, d will be removed by checkdefer.
765 gp._defer = d.link
767 freedefer(d)
770 // ran out of deferred calls - old-school panic now
771 // Because it is unsafe to call arbitrary user code after freezing
772 // the world, we call preprintpanics to invoke all necessary Error
773 // and String methods to prepare the panic strings before startpanic.
774 preprintpanics(gp._panic)
776 fatalpanic(gp._panic) // should not return
777 *(*int)(nil) = 0 // not reached
780 // currentDefer returns the top of the defer stack if it can be recovered.
781 // Otherwise it returns nil.
782 func currentDefer() *_defer {
783 gp := getg()
784 d := gp._defer
785 if d == nil {
786 return nil
789 // The panic that would be recovered is the one on the top of
790 // the panic stack. We do not want to recover it if that panic
791 // was on the top of the panic stack when this function was
792 // deferred.
793 if d.panicStack == gp._panic {
794 return nil
797 // The deferred thunk will call setdeferretaddr. If this has
798 // not happened, then we have not been called via defer, and
799 // we can not recover.
800 if d.retaddr == 0 {
801 return nil
804 return d
807 // canrecover is called by a thunk to see if the real function would
808 // be permitted to recover a panic value. Recovering a value is
809 // permitted if the thunk was called directly by defer. retaddr is the
810 // return address of the function that is calling canrecover--that is,
811 // the thunk.
812 func canrecover(retaddr uintptr) bool {
813 d := currentDefer()
814 if d == nil {
815 return false
818 ret := __builtin_extract_return_addr(retaddr)
819 dret := d.retaddr
820 if ret <= dret && ret+16 >= dret {
821 return true
824 // On some systems, in some cases, the return address does not
825 // work reliably. See http://gcc.gnu.org/PR60406. If we are
826 // permitted to call recover, the call stack will look like this:
827 // runtime.gopanic, runtime.deferreturn, etc.
828 // thunk to call deferred function (calls __go_set_defer_retaddr)
829 // function that calls __go_can_recover (passing return address)
830 // runtime.canrecover
831 // Calling callers will skip the thunks. So if our caller's
832 // caller starts with "runtime.", then we are permitted to
833 // call recover.
834 var locs [16]location
835 if callers(1, locs[:2]) < 2 {
836 return false
839 name := locs[1].function
840 if hasPrefix(name, "runtime.") {
841 return true
844 // If the function calling recover was created by reflect.MakeFunc,
845 // then makefuncfficanrecover will have set makefunccanrecover.
846 if !d.makefunccanrecover {
847 return false
850 // We look up the stack, ignoring libffi functions and
851 // functions in the reflect package, until we find
852 // reflect.makeFuncStub or reflect.ffi_callback called by FFI
853 // functions. Then we check the caller of that function.
855 n := callers(2, locs[:])
856 foundFFICallback := false
857 i := 0
858 for ; i < n; i++ {
859 name = locs[i].function
860 if name == "" {
861 // No function name means this caller isn't Go code.
862 // Assume that this is libffi.
863 continue
866 // Ignore function in libffi.
867 if hasPrefix(name, "ffi_") {
868 continue
871 if foundFFICallback {
872 break
875 if name == "reflect.ffi_callback" {
876 foundFFICallback = true
877 continue
880 // Ignore other functions in the reflect package.
881 if hasPrefix(name, "reflect.") || hasPrefix(name, ".1reflect.") {
882 continue
885 // We should now be looking at the real caller.
886 break
889 if i < n {
890 name = locs[i].function
891 if hasPrefix(name, "runtime.") {
892 return true
896 return false
899 // This function is called when code is about to enter a function
900 // created by the libffi version of reflect.MakeFunc. This function is
901 // passed the names of the callers of the libffi code that called the
902 // stub. It uses them to decide whether it is permitted to call
903 // recover, and sets d.makefunccanrecover so that gorecover can make
904 // the same decision.
905 func makefuncfficanrecover(loc []location) {
906 d := currentDefer()
907 if d == nil {
908 return
911 // If we are already in a call stack of MakeFunc functions,
912 // there is nothing we can usefully check here.
913 if d.makefunccanrecover {
914 return
917 // loc starts with the caller of our caller. That will be a thunk.
918 // If its caller was a function function, then it was called
919 // directly by defer.
920 if len(loc) < 2 {
921 return
924 name := loc[1].function
925 if hasPrefix(name, "runtime.") {
926 d.makefunccanrecover = true
930 // makefuncreturning is called when code is about to exit a function
931 // created by reflect.MakeFunc. It is called by the function stub used
932 // by reflect.MakeFunc. It clears the makefunccanrecover field. It's
933 // OK to always clear this field, because canrecover will only be
934 // called by a stub created for a function that calls recover. That
935 // stub will not call a function created by reflect.MakeFunc, so by
936 // the time we get here any caller higher up on the call stack no
937 // longer needs the information.
938 func makefuncreturning() {
939 d := getg()._defer
940 if d != nil {
941 d.makefunccanrecover = false
945 // The implementation of the predeclared function recover.
946 func gorecover() interface{} {
947 gp := getg()
948 p := gp._panic
949 if p != nil && !p.goexit && !p.recovered {
950 p.recovered = true
951 return p.arg
953 return nil
956 // deferredrecover is called when a call to recover is deferred. That
957 // is, something like
958 // defer recover()
960 // We need to handle this specially. In gc, the recover function
961 // looks up the stack frame. In particular, that means that a deferred
962 // recover will not recover a panic thrown in the same function that
963 // defers the recover. It will only recover a panic thrown in a
964 // function that defers the deferred call to recover.
966 // In other words:
968 // func f1() {
969 // defer recover() // does not stop panic
970 // panic(0)
971 // }
973 // func f2() {
974 // defer func() {
975 // defer recover() // stops panic(0)
976 // }()
977 // panic(0)
978 // }
980 // func f3() {
981 // defer func() {
982 // defer recover() // does not stop panic
983 // panic(0)
984 // }()
985 // panic(1)
986 // }
988 // func f4() {
989 // defer func() {
990 // defer func() {
991 // defer recover() // stops panic(0)
992 // }()
993 // panic(0)
994 // }()
995 // panic(1)
996 // }
998 // The interesting case here is f3. As can be seen from f2, the
999 // deferred recover could pick up panic(1). However, this does not
1000 // happen because it is blocked by the panic(0).
1002 // When a function calls recover, then when we invoke it we pass a
1003 // hidden parameter indicating whether it should recover something.
1004 // This parameter is set based on whether the function is being
1005 // invoked directly from defer. The parameter winds up determining
1006 // whether __go_recover or __go_deferred_recover is called at all.
1008 // In the case of a deferred recover, the hidden parameter that
1009 // controls the call is actually the one set up for the function that
1010 // runs the defer recover() statement. That is the right thing in all
1011 // the cases above except for f3. In f3 the function is permitted to
1012 // call recover, but the deferred recover call is not. We address that
1013 // here by checking for that specific case before calling recover. If
1014 // this function was deferred when there is already a panic on the
1015 // panic stack, then we can only recover that panic, not any other.
1017 // Note that we can get away with using a special function here
1018 // because you are not permitted to take the address of a predeclared
1019 // function like recover.
1020 func deferredrecover() interface{} {
1021 gp := getg()
1022 if gp._defer == nil || gp._defer.panicStack != gp._panic {
1023 return nil
1025 return gorecover()
1028 //go:linkname sync_throw sync.throw
1029 func sync_throw(s string) {
1030 throw(s)
1033 //go:nosplit
1034 func throw(s string) {
1035 // Everything throw does should be recursively nosplit so it
1036 // can be called even when it's unsafe to grow the stack.
1037 systemstack(func() {
1038 print("fatal error: ", s, "\n")
1040 gp := getg()
1041 if gp.m.throwing == 0 {
1042 gp.m.throwing = 1
1044 fatalthrow()
1045 *(*int)(nil) = 0 // not reached
1048 // runningPanicDefers is non-zero while running deferred functions for panic.
1049 // runningPanicDefers is incremented and decremented atomically.
1050 // This is used to try hard to get a panic stack trace out when exiting.
1051 var runningPanicDefers uint32
1053 // panicking is non-zero when crashing the program for an unrecovered panic.
1054 // panicking is incremented and decremented atomically.
1055 var panicking uint32
1057 // paniclk is held while printing the panic information and stack trace,
1058 // so that two concurrent panics don't overlap their output.
1059 var paniclk mutex
1061 // fatalthrow implements an unrecoverable runtime throw. It freezes the
1062 // system, prints stack traces starting from its caller, and terminates the
1063 // process.
1065 //go:nosplit
1066 func fatalthrow() {
1067 pc := getcallerpc()
1068 sp := getcallersp()
1069 gp := getg()
1071 startpanic_m()
1073 if dopanic_m(gp, pc, sp) {
1074 crash()
1077 exit(2)
1079 *(*int)(nil) = 0 // not reached
1082 // fatalpanic implements an unrecoverable panic. It is like fatalthrow, except
1083 // that if msgs != nil, fatalpanic also prints panic messages and decrements
1084 // runningPanicDefers once main is blocked from exiting.
1086 //go:nosplit
1087 func fatalpanic(msgs *_panic) {
1088 pc := getcallerpc()
1089 sp := getcallersp()
1090 gp := getg()
1091 var docrash bool
1093 if startpanic_m() && msgs != nil {
1094 // There were panic messages and startpanic_m
1095 // says it's okay to try to print them.
1097 // startpanic_m set panicking, which will
1098 // block main from exiting, so now OK to
1099 // decrement runningPanicDefers.
1100 atomic.Xadd(&runningPanicDefers, -1)
1102 printpanics(msgs)
1105 docrash = dopanic_m(gp, pc, sp)
1107 if docrash {
1108 // By crashing outside the above systemstack call, debuggers
1109 // will not be confused when generating a backtrace.
1110 // Function crash is marked nosplit to avoid stack growth.
1111 crash()
1114 systemstack(func() {
1115 exit(2)
1118 *(*int)(nil) = 0 // not reached
1121 // startpanic_m prepares for an unrecoverable panic.
1123 // It returns true if panic messages should be printed, or false if
1124 // the runtime is in bad shape and should just print stacks.
1126 // It must not have write barriers even though the write barrier
1127 // explicitly ignores writes once dying > 0. Write barriers still
1128 // assume that g.m.p != nil, and this function may not have P
1129 // in some contexts (e.g. a panic in a signal handler for a signal
1130 // sent to an M with no P).
1132 //go:nowritebarrierrec
1133 func startpanic_m() bool {
1134 _g_ := getg()
1135 if mheap_.cachealloc.size == 0 { // very early
1136 print("runtime: panic before malloc heap initialized\n")
1138 // Disallow malloc during an unrecoverable panic. A panic
1139 // could happen in a signal handler, or in a throw, or inside
1140 // malloc itself. We want to catch if an allocation ever does
1141 // happen (even if we're not in one of these situations).
1142 _g_.m.mallocing++
1144 // If we're dying because of a bad lock count, set it to a
1145 // good lock count so we don't recursively panic below.
1146 if _g_.m.locks < 0 {
1147 _g_.m.locks = 1
1150 switch _g_.m.dying {
1151 case 0:
1152 // Setting dying >0 has the side-effect of disabling this G's writebuf.
1153 _g_.m.dying = 1
1154 atomic.Xadd(&panicking, 1)
1155 lock(&paniclk)
1156 if debug.schedtrace > 0 || debug.scheddetail > 0 {
1157 schedtrace(true)
1159 freezetheworld()
1160 return true
1161 case 1:
1162 // Something failed while panicking.
1163 // Just print a stack trace and exit.
1164 _g_.m.dying = 2
1165 print("panic during panic\n")
1166 return false
1167 case 2:
1168 // This is a genuine bug in the runtime, we couldn't even
1169 // print the stack trace successfully.
1170 _g_.m.dying = 3
1171 print("stack trace unavailable\n")
1172 exit(4)
1173 fallthrough
1174 default:
1175 // Can't even print! Just exit.
1176 exit(5)
1177 return false // Need to return something.
1181 var didothers bool
1182 var deadlock mutex
1184 func dopanic_m(gp *g, pc, sp uintptr) bool {
1185 if gp.sig != 0 {
1186 signame := signame(gp.sig)
1187 if signame != "" {
1188 print("[signal ", signame)
1189 } else {
1190 print("[signal ", hex(gp.sig))
1192 print(" code=", hex(gp.sigcode0), " addr=", hex(gp.sigcode1), " pc=", hex(gp.sigpc), "]\n")
1195 level, all, docrash := gotraceback()
1196 _g_ := getg()
1197 if level > 0 {
1198 if gp != gp.m.curg {
1199 all = true
1201 if gp != gp.m.g0 {
1202 print("\n")
1203 goroutineheader(gp)
1204 traceback(0)
1205 } else if level >= 2 || _g_.m.throwing > 0 {
1206 print("\nruntime stack:\n")
1207 traceback(0)
1209 if !didothers && all {
1210 didothers = true
1211 tracebackothers(gp)
1214 unlock(&paniclk)
1216 if atomic.Xadd(&panicking, -1) != 0 {
1217 // Some other m is panicking too.
1218 // Let it print what it needs to print.
1219 // Wait forever without chewing up cpu.
1220 // It will exit when it's done.
1221 lock(&deadlock)
1222 lock(&deadlock)
1225 printDebugLog()
1227 return docrash
1230 // canpanic returns false if a signal should throw instead of
1231 // panicking.
1233 //go:nosplit
1234 func canpanic(gp *g) bool {
1235 // Note that g is m->gsignal, different from gp.
1236 // Note also that g->m can change at preemption, so m can go stale
1237 // if this function ever makes a function call.
1238 _g_ := getg()
1239 mp := _g_.m
1241 // Is it okay for gp to panic instead of crashing the program?
1242 // Yes, as long as it is running Go code, not runtime code,
1243 // and not stuck in a system call.
1244 if gp == nil || gp != mp.curg {
1245 return false
1247 if mp.locks != 0 || mp.mallocing != 0 || mp.throwing != 0 || mp.preemptoff != "" || mp.dying != 0 {
1248 return false
1250 status := readgstatus(gp)
1251 if status&^_Gscan != _Grunning || gp.syscallsp != 0 {
1252 return false
1254 return true
1257 // isAbortPC reports whether pc is the program counter at which
1258 // runtime.abort raises a signal.
1260 // It is nosplit because it's part of the isgoexception
1261 // implementation.
1263 //go:nosplit
1264 func isAbortPC(pc uintptr) bool {
1265 return false