sync/atomic, runtime/internal/atomic: don't assume reads from 0 fail
[official-gcc.git] / libgo / go / runtime / panic.go
blobc39a58d0c4b22d75378058808ba4c72a6446b70b
1 // Copyright 2014 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 package runtime
7 import (
8 "runtime/internal/atomic"
9 "unsafe"
12 // For gccgo, use go:linkname to rename compiler-called functions to
13 // themselves, so that the compiler will export them.
15 //go:linkname deferproc runtime.deferproc
16 //go:linkname deferreturn runtime.deferreturn
17 //go:linkname setdeferretaddr runtime.setdeferretaddr
18 //go:linkname checkdefer runtime.checkdefer
19 //go:linkname gopanic runtime.gopanic
20 //go:linkname canrecover runtime.canrecover
21 //go:linkname makefuncfficanrecover runtime.makefuncfficanrecover
22 //go:linkname makefuncreturning runtime.makefuncreturning
23 //go:linkname gorecover runtime.gorecover
24 //go:linkname deferredrecover runtime.deferredrecover
25 //go:linkname panicmem runtime.panicmem
26 // Temporary for C code to call:
27 //go:linkname throw runtime.throw
29 // Calling panic with one of the errors below will call errorString.Error
30 // which will call mallocgc to concatenate strings. That will fail if
31 // malloc is locked, causing a confusing error message. Throw a better
32 // error message instead.
33 func panicCheckMalloc(err error) {
34 gp := getg()
35 if gp != nil && gp.m != nil && gp.m.mallocing != 0 {
36 throw(string(err.(errorString)))
40 var indexError = error(errorString("index out of range"))
42 func panicindex() {
43 panicCheckMalloc(indexError)
44 panic(indexError)
47 var sliceError = error(errorString("slice bounds out of range"))
49 func panicslice() {
50 panicCheckMalloc(sliceError)
51 panic(sliceError)
54 var divideError = error(errorString("integer divide by zero"))
56 func panicdivide() {
57 panicCheckMalloc(divideError)
58 panic(divideError)
61 var overflowError = error(errorString("integer overflow"))
63 func panicoverflow() {
64 panicCheckMalloc(overflowError)
65 panic(overflowError)
68 var floatError = error(errorString("floating point error"))
70 func panicfloat() {
71 panicCheckMalloc(floatError)
72 panic(floatError)
75 var memoryError = error(errorString("invalid memory address or nil pointer dereference"))
77 func panicmem() {
78 panicCheckMalloc(memoryError)
79 panic(memoryError)
82 func throwinit() {
83 throw("recursive call during initialization - linker skew")
86 // deferproc creates a new deferred function.
87 // The compiler turns a defer statement into a call to this.
88 // frame points into the stack frame; it is used to determine which
89 // deferred functions are for the current stack frame, and whether we
90 // have already deferred functions for this frame.
91 // pfn is a C function pointer.
92 // arg is a value to pass to pfn.
93 func deferproc(frame *bool, pfn uintptr, arg unsafe.Pointer) {
94 d := newdefer()
95 if d._panic != nil {
96 throw("deferproc: d.panic != nil after newdefer")
98 d.frame = frame
99 d.panicStack = getg()._panic
100 d.pfn = pfn
101 d.arg = arg
102 d.retaddr = 0
103 d.makefunccanrecover = false
106 // Allocate a Defer, usually using per-P pool.
107 // Each defer must be released with freedefer.
108 func newdefer() *_defer {
109 var d *_defer
110 gp := getg()
111 pp := gp.m.p.ptr()
112 if len(pp.deferpool) == 0 && sched.deferpool != nil {
113 systemstack(func() {
114 lock(&sched.deferlock)
115 for len(pp.deferpool) < cap(pp.deferpool)/2 && sched.deferpool != nil {
116 d := sched.deferpool
117 sched.deferpool = d.link
118 d.link = nil
119 pp.deferpool = append(pp.deferpool, d)
121 unlock(&sched.deferlock)
124 if n := len(pp.deferpool); n > 0 {
125 d = pp.deferpool[n-1]
126 pp.deferpool[n-1] = nil
127 pp.deferpool = pp.deferpool[:n-1]
129 if d == nil {
130 systemstack(func() {
131 d = new(_defer)
134 d.link = gp._defer
135 gp._defer = d
136 return d
139 // Free the given defer.
140 // The defer cannot be used after this call.
142 // This must not grow the stack because there may be a frame without a
143 // stack map when this is called.
145 //go:nosplit
146 func freedefer(d *_defer) {
147 pp := getg().m.p.ptr()
148 if len(pp.deferpool) == cap(pp.deferpool) {
149 // Transfer half of local cache to the central cache.
151 // Take this slow path on the system stack so
152 // we don't grow freedefer's stack.
153 systemstack(func() {
154 var first, last *_defer
155 for len(pp.deferpool) > cap(pp.deferpool)/2 {
156 n := len(pp.deferpool)
157 d := pp.deferpool[n-1]
158 pp.deferpool[n-1] = nil
159 pp.deferpool = pp.deferpool[:n-1]
160 if first == nil {
161 first = d
162 } else {
163 last.link = d
165 last = d
167 lock(&sched.deferlock)
168 last.link = sched.deferpool
169 sched.deferpool = first
170 unlock(&sched.deferlock)
173 *d = _defer{}
174 pp.deferpool = append(pp.deferpool, d)
177 // deferreturn is called to undefer the stack.
178 // The compiler inserts a call to this function as a finally clause
179 // wrapped around the body of any function that calls defer.
180 // The frame argument points to the stack frame of the function.
181 func deferreturn(frame *bool) {
182 gp := getg()
183 for gp._defer != nil && gp._defer.frame == frame {
184 d := gp._defer
185 pfn := d.pfn
186 d.pfn = 0
188 if pfn != 0 {
189 // This is rather awkward.
190 // The gc compiler does this using assembler
191 // code in jmpdefer.
192 var fn func(unsafe.Pointer)
193 *(*uintptr)(unsafe.Pointer(&fn)) = uintptr(unsafe.Pointer(&pfn))
194 fn(d.arg)
197 // If we are returning from a Go function called by a
198 // C function running in a C thread, g may now be nil,
199 // in which case CgocallBackDone will have cleared _defer.
200 // In that case some other goroutine may already be using gp.
201 if getg() == nil {
202 *frame = true
203 return
206 gp._defer = d.link
208 freedefer(d)
210 // Since we are executing a defer function now, we
211 // know that we are returning from the calling
212 // function. If the calling function, or one of its
213 // callees, panicked, then the defer functions would
214 // be executed by panic.
215 *frame = true
219 // __builtin_extract_return_addr is a GCC intrinsic that converts an
220 // address returned by __builtin_return_address(0) to a real address.
221 // On most architectures this is a nop.
222 //extern __builtin_extract_return_addr
223 func __builtin_extract_return_addr(uintptr) uintptr
225 // setdeferretaddr records the address to which the deferred function
226 // returns. This is check by canrecover. The frontend relies on this
227 // function returning false.
228 func setdeferretaddr(retaddr uintptr) bool {
229 gp := getg()
230 if gp._defer != nil {
231 gp._defer.retaddr = __builtin_extract_return_addr(retaddr)
233 return false
236 // checkdefer is called by exception handlers used when unwinding the
237 // stack after a recovered panic. The exception handler is simply
238 // checkdefer(frame)
239 // return;
240 // If we have not yet reached the frame we are looking for, we
241 // continue unwinding.
242 func checkdefer(frame *bool) {
243 gp := getg()
244 if gp == nil {
245 // We should never wind up here. Even if some other
246 // language throws an exception, the cgo code
247 // should ensure that g is set.
248 throw("no g in checkdefer")
249 } else if gp.isforeign {
250 // Some other language has thrown an exception.
251 // We need to run the local defer handlers.
252 // If they call recover, we stop unwinding here.
253 var p _panic
254 p.isforeign = true
255 p.link = gp._panic
256 gp._panic = &p
257 for {
258 d := gp._defer
259 if d == nil || d.frame != frame || d.pfn == 0 {
260 break
263 pfn := d.pfn
264 gp._defer = d.link
266 var fn func(unsafe.Pointer)
267 *(*uintptr)(unsafe.Pointer(&fn)) = uintptr(unsafe.Pointer(&pfn))
268 fn(d.arg)
270 freedefer(d)
272 if p.recovered {
273 // The recover function caught the panic
274 // thrown by some other language.
275 break
279 recovered := p.recovered
280 gp._panic = p.link
282 if recovered {
283 // Just return and continue executing Go code.
284 *frame = true
285 return
288 // We are panicking through this function.
289 *frame = false
290 } else if gp._defer != nil && gp._defer.pfn == 0 && gp._defer.frame == frame {
291 // This is the defer function that called recover.
292 // Simply return to stop the stack unwind, and let the
293 // Go code continue to execute.
294 d := gp._defer
295 gp._defer = d.link
296 freedefer(d)
298 // We are returning from this function.
299 *frame = true
301 return
304 // This is some other defer function. It was already run by
305 // the call to panic, or just above. Rethrow the exception.
306 rethrowException()
307 throw("rethrowException returned")
310 // unwindStack starts unwinding the stack for a panic. We unwind
311 // function calls until we reach the one which used a defer function
312 // which called recover. Each function which uses a defer statement
313 // will have an exception handler, as shown above for checkdefer.
314 func unwindStack() {
315 // Allocate the exception type used by the unwind ABI.
316 // It would be nice to define it in runtime_sysinfo.go,
317 // but current definitions don't work because the required
318 // alignment is larger than can be represented in Go.
319 // The type never contains any Go pointers.
320 size := unwindExceptionSize()
321 usize := uintptr(unsafe.Sizeof(uintptr(0)))
322 c := (size + usize - 1) / usize
323 s := make([]uintptr, c)
324 getg().exception = unsafe.Pointer(&s[0])
325 throwException()
328 // Goexit terminates the goroutine that calls it. No other goroutine is affected.
329 // Goexit runs all deferred calls before terminating the goroutine. Because Goexit
330 // is not panic, however, any recover calls in those deferred functions will return nil.
332 // Calling Goexit from the main goroutine terminates that goroutine
333 // without func main returning. Since func main has not returned,
334 // the program continues execution of other goroutines.
335 // If all other goroutines exit, the program crashes.
336 func Goexit() {
337 // Run all deferred functions for the current goroutine.
338 // This code is similar to gopanic, see that implementation
339 // for detailed comments.
340 gp := getg()
341 for {
342 d := gp._defer
343 if d == nil {
344 break
347 pfn := d.pfn
348 if pfn == 0 {
349 if d._panic != nil {
350 d._panic.aborted = true
351 d._panic = nil
353 gp._defer = d.link
354 freedefer(d)
355 continue
357 d.pfn = 0
359 var fn func(unsafe.Pointer)
360 *(*uintptr)(unsafe.Pointer(&fn)) = uintptr(unsafe.Pointer(&pfn))
361 fn(d.arg)
363 if gp._defer != d {
364 throw("bad defer entry in Goexit")
366 d._panic = nil
367 gp._defer = d.link
368 freedefer(d)
369 // Note: we ignore recovers here because Goexit isn't a panic
371 goexit1()
374 // Call all Error and String methods before freezing the world.
375 // Used when crashing with panicking.
376 // This must match types handled by printany.
377 func preprintpanics(p *_panic) {
378 defer func() {
379 if recover() != nil {
380 throw("panic while printing panic value")
383 for p != nil {
384 switch v := p.arg.(type) {
385 case error:
386 p.arg = v.Error()
387 case stringer:
388 p.arg = v.String()
390 p = p.link
394 // Print all currently active panics. Used when crashing.
395 func printpanics(p *_panic) {
396 if p.link != nil {
397 printpanics(p.link)
398 print("\t")
400 print("panic: ")
401 printany(p.arg)
402 if p.recovered {
403 print(" [recovered]")
405 print("\n")
408 // The implementation of the predeclared function panic.
409 func gopanic(e interface{}) {
410 gp := getg()
411 if gp.m.curg != gp {
412 print("panic: ")
413 printany(e)
414 print("\n")
415 throw("panic on system stack")
418 if gp.m.mallocing != 0 {
419 print("panic: ")
420 printany(e)
421 print("\n")
422 throw("panic during malloc")
424 if gp.m.preemptoff != "" {
425 print("panic: ")
426 printany(e)
427 print("\n")
428 print("preempt off reason: ")
429 print(gp.m.preemptoff)
430 print("\n")
431 throw("panic during preemptoff")
433 if gp.m.locks != 0 {
434 print("panic: ")
435 printany(e)
436 print("\n")
437 throw("panic holding locks")
440 // The gc compiler allocates this new _panic struct on the
441 // stack. We can't do that, because when a deferred function
442 // recovers the panic we unwind the stack. We unlink this
443 // entry before unwinding the stack, but that doesn't help in
444 // the case where we panic, a deferred function recovers and
445 // then panics itself, that panic is in turn recovered, and
446 // unwinds the stack past this stack frame.
448 p := &_panic{
449 arg: e,
450 link: gp._panic,
452 gp._panic = p
454 atomic.Xadd(&runningPanicDefers, 1)
456 for {
457 d := gp._defer
458 if d == nil {
459 break
462 pfn := d.pfn
464 // If defer was started by earlier panic or Goexit (and, since we're back here, that triggered a new panic),
465 // take defer off list. The earlier panic or Goexit will not continue running.
466 if pfn == 0 {
467 if d._panic != nil {
468 d._panic.aborted = true
470 d._panic = nil
471 gp._defer = d.link
472 freedefer(d)
473 continue
475 d.pfn = 0
477 // Record the panic that is running the defer.
478 // If there is a new panic during the deferred call, that panic
479 // will find d in the list and will mark d._panic (this panic) aborted.
480 d._panic = p
482 var fn func(unsafe.Pointer)
483 *(*uintptr)(unsafe.Pointer(&fn)) = uintptr(unsafe.Pointer(&pfn))
484 fn(d.arg)
486 if gp._defer != d {
487 throw("bad defer entry in panic")
489 d._panic = nil
491 if p.recovered {
492 atomic.Xadd(&runningPanicDefers, -1)
494 gp._panic = p.link
496 // Aborted panics are marked but remain on the g.panic list.
497 // Remove them from the list.
498 for gp._panic != nil && gp._panic.aborted {
499 gp._panic = gp._panic.link
501 if gp._panic == nil { // must be done with signal
502 gp.sig = 0
505 // Unwind the stack by throwing an exception.
506 // The compiler has arranged to create
507 // exception handlers in each function
508 // that uses a defer statement. These
509 // exception handlers will check whether
510 // the entry on the top of the defer stack
511 // is from the current function. If it is,
512 // we have unwound the stack far enough.
513 unwindStack()
515 throw("unwindStack returned")
518 // Because we executed that defer function by a panic,
519 // and it did not call recover, we know that we are
520 // not returning from the calling function--we are
521 // panicking through it.
522 *d.frame = false
524 // Deferred function did not panic. Remove d.
525 // In the p.recovered case, d will be removed by checkdefer.
526 gp._defer = d.link
528 freedefer(d)
531 // ran out of deferred calls - old-school panic now
532 // Because it is unsafe to call arbitrary user code after freezing
533 // the world, we call preprintpanics to invoke all necessary Error
534 // and String methods to prepare the panic strings before startpanic.
535 preprintpanics(gp._panic)
536 startpanic()
538 // startpanic set panicking, which will block main from exiting,
539 // so now OK to decrement runningPanicDefers.
540 atomic.Xadd(&runningPanicDefers, -1)
542 printpanics(gp._panic)
543 dopanic(0) // should not return
544 *(*int)(nil) = 0 // not reached
547 // currentDefer returns the top of the defer stack if it can be recovered.
548 // Otherwise it returns nil.
549 func currentDefer() *_defer {
550 gp := getg()
551 d := gp._defer
552 if d == nil {
553 return nil
556 // The panic that would be recovered is the one on the top of
557 // the panic stack. We do not want to recover it if that panic
558 // was on the top of the panic stack when this function was
559 // deferred.
560 if d.panicStack == gp._panic {
561 return nil
564 // The deferred thunk will call setdeferretaddr. If this has
565 // not happened, then we have not been called via defer, and
566 // we can not recover.
567 if d.retaddr == 0 {
568 return nil
571 return d
574 // canrecover is called by a thunk to see if the real function would
575 // be permitted to recover a panic value. Recovering a value is
576 // permitted if the thunk was called directly by defer. retaddr is the
577 // return address of the function that is calling canrecover--that is,
578 // the thunk.
579 func canrecover(retaddr uintptr) bool {
580 d := currentDefer()
581 if d == nil {
582 return false
585 ret := __builtin_extract_return_addr(retaddr)
586 dret := d.retaddr
587 if ret <= dret && ret+16 >= dret {
588 return true
591 // On some systems, in some cases, the return address does not
592 // work reliably. See http://gcc.gnu.org/PR60406. If we are
593 // permitted to call recover, the call stack will look like this:
594 // runtime.gopanic, runtime.deferreturn, etc.
595 // thunk to call deferred function (calls __go_set_defer_retaddr)
596 // function that calls __go_can_recover (passing return address)
597 // runtime.canrecover
598 // Calling callers will skip the thunks. So if our caller's
599 // caller starts with "runtime.", then we are permitted to
600 // call recover.
601 var locs [16]location
602 if callers(2, locs[:2]) < 2 {
603 return false
606 name := locs[1].function
607 if hasprefix(name, "runtime.") {
608 return true
611 // If the function calling recover was created by reflect.MakeFunc,
612 // then makefuncfficanrecover will have set makefunccanrecover.
613 if !d.makefunccanrecover {
614 return false
617 // We look up the stack, ignoring libffi functions and
618 // functions in the reflect package, until we find
619 // reflect.makeFuncStub or reflect.ffi_callback called by FFI
620 // functions. Then we check the caller of that function.
622 n := callers(3, locs[:])
623 foundFFICallback := false
624 i := 0
625 for ; i < n; i++ {
626 name = locs[i].function
627 if name == "" {
628 // No function name means this caller isn't Go code.
629 // Assume that this is libffi.
630 continue
633 // Ignore function in libffi.
634 if hasprefix(name, "ffi_") {
635 continue
638 if foundFFICallback {
639 break
642 if name == "reflect.ffi_callback" {
643 foundFFICallback = true
644 continue
647 // Ignore other functions in the reflect package.
648 if hasprefix(name, "reflect.") {
649 continue
652 // We should now be looking at the real caller.
653 break
656 if i < n {
657 name = locs[i].function
658 if hasprefix(name, "runtime.") {
659 return true
663 return false
666 // This function is called when code is about to enter a function
667 // created by the libffi version of reflect.MakeFunc. This function is
668 // passed the names of the callers of the libffi code that called the
669 // stub. It uses them to decide whether it is permitted to call
670 // recover, and sets d.makefunccanrecover so that gorecover can make
671 // the same decision.
672 func makefuncfficanrecover(loc []location) {
673 d := currentDefer()
674 if d == nil {
675 return
678 // If we are already in a call stack of MakeFunc functions,
679 // there is nothing we can usefully check here.
680 if d.makefunccanrecover {
681 return
684 // loc starts with the caller of our caller. That will be a thunk.
685 // If its caller was a function function, then it was called
686 // directly by defer.
687 if len(loc) < 2 {
688 return
691 name := loc[1].function
692 if hasprefix(name, "runtime.") {
693 d.makefunccanrecover = true
697 // makefuncreturning is called when code is about to exit a function
698 // created by reflect.MakeFunc. It is called by the function stub used
699 // by reflect.MakeFunc. It clears the makefunccanrecover field. It's
700 // OK to always clear this field, because canrecover will only be
701 // called by a stub created for a function that calls recover. That
702 // stub will not call a function created by reflect.MakeFunc, so by
703 // the time we get here any caller higher up on the call stack no
704 // longer needs the information.
705 func makefuncreturning() {
706 d := getg()._defer
707 if d != nil {
708 d.makefunccanrecover = false
712 // The implementation of the predeclared function recover.
713 func gorecover() interface{} {
714 gp := getg()
715 p := gp._panic
716 if p != nil && !p.recovered {
717 p.recovered = true
718 return p.arg
720 return nil
723 // deferredrecover is called when a call to recover is deferred. That
724 // is, something like
725 // defer recover()
727 // We need to handle this specially. In gc, the recover function
728 // looks up the stack frame. In particular, that means that a deferred
729 // recover will not recover a panic thrown in the same function that
730 // defers the recover. It will only recover a panic thrown in a
731 // function that defers the deferred call to recover.
733 // In other words:
735 // func f1() {
736 // defer recover() // does not stop panic
737 // panic(0)
738 // }
740 // func f2() {
741 // defer func() {
742 // defer recover() // stops panic(0)
743 // }()
744 // panic(0)
745 // }
747 // func f3() {
748 // defer func() {
749 // defer recover() // does not stop panic
750 // panic(0)
751 // }()
752 // panic(1)
753 // }
755 // func f4() {
756 // defer func() {
757 // defer func() {
758 // defer recover() // stops panic(0)
759 // }()
760 // panic(0)
761 // }()
762 // panic(1)
763 // }
765 // The interesting case here is f3. As can be seen from f2, the
766 // deferred recover could pick up panic(1). However, this does not
767 // happen because it is blocked by the panic(0).
769 // When a function calls recover, then when we invoke it we pass a
770 // hidden parameter indicating whether it should recover something.
771 // This parameter is set based on whether the function is being
772 // invoked directly from defer. The parameter winds up determining
773 // whether __go_recover or __go_deferred_recover is called at all.
775 // In the case of a deferred recover, the hidden parameter that
776 // controls the call is actually the one set up for the function that
777 // runs the defer recover() statement. That is the right thing in all
778 // the cases above except for f3. In f3 the function is permitted to
779 // call recover, but the deferred recover call is not. We address that
780 // here by checking for that specific case before calling recover. If
781 // this function was deferred when there is already a panic on the
782 // panic stack, then we can only recover that panic, not any other.
784 // Note that we can get away with using a special function here
785 // because you are not permitted to take the address of a predeclared
786 // function like recover.
787 func deferredrecover() interface{} {
788 gp := getg()
789 if gp._defer == nil || gp._defer.panicStack != gp._panic {
790 return nil
792 return gorecover()
795 //go:linkname sync_throw sync.throw
796 func sync_throw(s string) {
797 throw(s)
800 //go:nosplit
801 func throw(s string) {
802 print("fatal error: ", s, "\n")
803 gp := getg()
804 if gp.m.throwing == 0 {
805 gp.m.throwing = 1
807 startpanic()
808 dopanic(0)
809 *(*int)(nil) = 0 // not reached
812 // runningPanicDefers is non-zero while running deferred functions for panic.
813 // runningPanicDefers is incremented and decremented atomically.
814 // This is used to try hard to get a panic stack trace out when exiting.
815 var runningPanicDefers uint32
817 // panicking is non-zero when crashing the program for an unrecovered panic.
818 // panicking is incremented and decremented atomically.
819 var panicking uint32
821 // paniclk is held while printing the panic information and stack trace,
822 // so that two concurrent panics don't overlap their output.
823 var paniclk mutex
825 func startpanic() {
826 _g_ := getg()
827 // Uncomment when mheap_ is in Go.
828 // if mheap_.cachealloc.size == 0 { // very early
829 // print("runtime: panic before malloc heap initialized\n")
830 // _g_.m.mallocing = 1 // tell rest of panic not to try to malloc
831 // } else
832 if _g_.m.mcache == nil { // can happen if called from signal handler or throw
833 _g_.m.mcache = allocmcache()
836 switch _g_.m.dying {
837 case 0:
838 _g_.m.dying = 1
839 _g_.writebuf = nil
840 atomic.Xadd(&panicking, 1)
841 lock(&paniclk)
842 if debug.schedtrace > 0 || debug.scheddetail > 0 {
843 schedtrace(true)
845 freezetheworld()
846 return
847 case 1:
848 // Something failed while panicking, probably the print of the
849 // argument to panic(). Just print a stack trace and exit.
850 _g_.m.dying = 2
851 print("panic during panic\n")
852 dopanic(0)
853 exit(3)
854 fallthrough
855 case 2:
856 // This is a genuine bug in the runtime, we couldn't even
857 // print the stack trace successfully.
858 _g_.m.dying = 3
859 print("stack trace unavailable\n")
860 exit(4)
861 fallthrough
862 default:
863 // Can't even print! Just exit.
864 exit(5)
868 var didothers bool
869 var deadlock mutex
871 func dopanic(unused int) {
872 gp := getg()
873 if gp.sig != 0 {
874 signame := signame(gp.sig)
875 if signame != "" {
876 print("[signal ", signame)
877 } else {
878 print("[signal ", hex(gp.sig))
880 print(" code=", hex(gp.sigcode0), " addr=", hex(gp.sigcode1), " pc=", hex(gp.sigpc), "]\n")
883 level, all, docrash := gotraceback()
884 _g_ := getg()
885 if level > 0 {
886 if gp != gp.m.curg {
887 all = true
889 if gp != gp.m.g0 {
890 print("\n")
891 goroutineheader(gp)
892 traceback(0)
893 } else if level >= 2 || _g_.m.throwing > 0 {
894 print("\nruntime stack:\n")
895 traceback(0)
897 if !didothers && all {
898 didothers = true
899 tracebackothers(gp)
902 unlock(&paniclk)
904 if atomic.Xadd(&panicking, -1) != 0 {
905 // Some other m is panicking too.
906 // Let it print what it needs to print.
907 // Wait forever without chewing up cpu.
908 // It will exit when it's done.
909 lock(&deadlock)
910 lock(&deadlock)
913 if docrash {
914 crash()
917 exit(2)
920 //go:nosplit
921 func canpanic(gp *g) bool {
922 // Note that g is m->gsignal, different from gp.
923 // Note also that g->m can change at preemption, so m can go stale
924 // if this function ever makes a function call.
925 _g_ := getg()
926 _m_ := _g_.m
928 // Is it okay for gp to panic instead of crashing the program?
929 // Yes, as long as it is running Go code, not runtime code,
930 // and not stuck in a system call.
931 if gp == nil || gp != _m_.curg {
932 return false
934 if _m_.locks-_m_.softfloat != 0 || _m_.mallocing != 0 || _m_.throwing != 0 || _m_.preemptoff != "" || _m_.dying != 0 {
935 return false
937 status := readgstatus(gp)
938 if status&^_Gscan != _Grunning || gp.syscallsp != 0 {
939 return false
941 return true