1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
8 "runtime/internal/atomic"
13 // For gccgo, while we still have C runtime code, use go:linkname to
14 // rename some functions to themselves, so that the compiler will
17 //go:linkname gotraceback runtime.gotraceback
18 //go:linkname args runtime.args
19 //go:linkname goargs runtime.goargs
20 //go:linkname check runtime.check
21 //go:linkname goenvs_unix runtime.goenvs_unix
22 //go:linkname parsedebugvars runtime.parsedebugvars
23 //go:linkname timediv runtime.timediv
25 // Keep a cached value to make gotraceback fast,
26 // since we call it on every call to gentraceback.
27 // The cached value is a uint32 in which the low bits
28 // are the "crash" and "all" settings and the remaining
29 // bits are the traceback value (0 off, 1 on, 2 include system).
31 tracebackCrash
= 1 << iota
36 var traceback_cache
uint32 = 2 << tracebackShift
37 var traceback_env
uint32
39 // gotraceback returns the current traceback settings.
41 // If level is 0, suppress all tracebacks.
42 // If level is 1, show tracebacks, but exclude runtime frames.
43 // If level is 2, show tracebacks including runtime frames.
44 // If all is set, print all goroutine stacks. Otherwise, print just the current goroutine.
45 // If crash is set, crash (core dump, etc) after tracebacking.
48 func gotraceback() (level
int32, all
, crash
bool) {
50 t
:= atomic
.Load(&traceback_cache
)
51 crash
= t
&tracebackCrash
!= 0
52 all
= _g_
.m
.throwing
> 0 || t
&tracebackAll
!= 0
53 if _g_
.m
.traceback
!= 0 {
54 level
= int32(_g_
.m
.traceback
)
56 level
= int32(t
>> tracebackShift
)
66 // nosplit for use in linux startup sysargs
68 func argv_index(argv
**byte, i
int32) *byte {
69 return *(**byte)(add(unsafe
.Pointer(argv
), uintptr(i
)*sys
.PtrSize
))
72 func args(c
int32, v
**byte) {
79 if GOOS
== "windows" {
82 argslice
= make([]string, argc
)
83 for i
:= int32(0); i
< argc
; i
++ {
84 argslice
[i
] = gostringnocopy(argv_index(argv
, i
))
89 // TODO(austin): ppc64 in dynamic linking mode doesn't
90 // guarantee env[] will immediately follow argv. Might cause
93 for argv_index(argv
, argc
+1+n
) != nil {
97 envs
= make([]string, n
)
98 for i
:= int32(0); i
< n
; i
++ {
99 envs
[i
] = gostring(argv_index(argv
, argc
+1+i
))
103 func environ() []string {
107 // TODO: These should be locals in testAtomic64, but we don't 8-byte
108 // align stack variables on 386.
109 var test_z64
, test_x64
uint64
111 func testAtomic64() {
114 if atomic
.Cas64(&test_z64
, test_x64
, 1) {
115 throw("cas64 failed")
118 throw("cas64 failed")
121 if !atomic
.Cas64(&test_z64
, test_x64
, 1) {
122 throw("cas64 failed")
124 if test_x64
!= 42 || test_z64
!= 1 {
125 throw("cas64 failed")
127 if atomic
.Load64(&test_z64
) != 1 {
128 throw("load64 failed")
130 atomic
.Store64(&test_z64
, (1<<40)+1)
131 if atomic
.Load64(&test_z64
) != (1<<40)+1 {
132 throw("store64 failed")
134 if atomic
.Xadd64(&test_z64
, (1<<40)+1) != (2<<40)+2 {
135 throw("xadd64 failed")
137 if atomic
.Load64(&test_z64
) != (2<<40)+2 {
138 throw("xadd64 failed")
140 if atomic
.Xchg64(&test_z64
, (3<<40)+3) != (2<<40)+2 {
141 throw("xchg64 failed")
143 if atomic
.Load64(&test_z64
) != (3<<40)+3 {
144 throw("xchg64 failed")
174 if unsafe
.Sizeof(a
) != 1 {
177 if unsafe
.Sizeof(b
) != 1 {
180 if unsafe
.Sizeof(c
) != 2 {
183 if unsafe
.Sizeof(d
) != 2 {
186 if unsafe
.Sizeof(e
) != 4 {
189 if unsafe
.Sizeof(f
) != 4 {
192 if unsafe
.Sizeof(g
) != 8 {
195 if unsafe
.Sizeof(h
) != 8 {
198 if unsafe
.Sizeof(i
) != 4 {
201 if unsafe
.Sizeof(j
) != 8 {
204 if unsafe
.Sizeof(k
) != sys
.PtrSize
{
207 if unsafe
.Sizeof(l
) != sys
.PtrSize
{
210 if unsafe
.Sizeof(x1
) != 1 {
211 throw("bad unsafe.Sizeof x1")
213 if unsafe
.Offsetof(y1
.y
) != 1 {
214 throw("bad offsetof y1.y")
216 if unsafe
.Sizeof(y1
) != 2 {
217 throw("bad unsafe.Sizeof y1")
220 if timediv(12345*1000000000+54321, 1000000000, &e
) != 12345 || e
!= 54321 {
226 if !atomic
.Cas(&z
, 1, 2) {
234 if atomic
.Cas(&z
, 5, 6) {
242 if !atomic
.Cas(&z
, 0xffffffff, 0xfffffffe) {
249 k
= unsafe
.Pointer(uintptr(0xfedcb123))
250 if sys
.PtrSize
== 8 {
251 k
= unsafe
.Pointer(uintptr(k
) << 10)
253 if casp(&k
, nil, nil) {
257 if !casp(&k
, k
, k1
) {
264 m
= [4]byte{1, 1, 1, 1}
265 atomic
.Or8(&m
[1], 0xf0)
266 if m
[0] != 1 || m
[1] != 0xf1 || m
[2] != 1 || m
[3] != 1 {
270 m
= [4]byte{0xff, 0xff, 0xff, 0xff}
271 atomic
.And8(&m
[1], 0x1)
272 if m
[0] != 0xff || m
[1] != 0x1 || m
[2] != 0xff || m
[3] != 0xff {
276 *(*uint64)(unsafe
.Pointer(&j
)) = ^uint64(0)
284 *(*uint64)(unsafe
.Pointer(&j1
)) = ^uint64(1)
292 *(*uint32)(unsafe
.Pointer(&i
)) = ^uint32(0)
300 *(*uint32)(unsafe
.Pointer(&i1
)) = ^uint32(1)
310 // if _FixedStack != round2(_FixedStack) {
311 // throw("FixedStack is not power-of-2")
315 throw("assembly checks failed")
324 // Holds variables parsed from GODEBUG env var,
325 // except for "memprofilerate" since there is an
326 // existing int var for that value, which may
327 // already have an initial value.
334 gcshrinkstackoff
int32
343 tracebackancestors
int32
346 var dbgvars
= []dbgVar
{
347 {"allocfreetrace", &debug
.allocfreetrace
},
348 {"cgocheck", &debug
.cgocheck
},
349 {"efence", &debug
.efence
},
350 {"gccheckmark", &debug
.gccheckmark
},
351 {"gcpacertrace", &debug
.gcpacertrace
},
352 {"gcshrinkstackoff", &debug
.gcshrinkstackoff
},
353 {"gcrescanstacks", &debug
.gcrescanstacks
},
354 {"gcstoptheworld", &debug
.gcstoptheworld
},
355 {"gctrace", &debug
.gctrace
},
356 {"invalidptr", &debug
.invalidptr
},
357 {"sbrk", &debug
.sbrk
},
358 {"scavenge", &debug
.scavenge
},
359 {"scheddetail", &debug
.scheddetail
},
360 {"schedtrace", &debug
.schedtrace
},
361 {"tracebackancestors", &debug
.tracebackancestors
},
364 func parsedebugvars() {
368 // Unfortunately, because gccgo uses conservative stack scanning,
369 // we can not enable invalid pointer checking. It is possible for
370 // memory block M1 to point to M2, and for both to be dead.
371 // We release M2, causing the entire span to be released.
372 // Before we release M1, a stack pointer appears that point into it.
373 // This stack pointer is presumably dead, but causes M1 to be marked.
374 // We scan M1 and see the pointer to M2 on a released span.
375 // At that point, if debug.invalidptr is set, we crash.
376 // This is not a problem, assuming that M1 really is dead and
377 // the pointer we discovered to it will not be used.
378 // debug.invalidptr = 1
380 for p
:= gogetenv("GODEBUG"); p
!= ""; {
386 field
, p
= p
[:i
], p
[i
+1:]
388 i
= index(field
, "=")
392 key
, value
:= field
[:i
], field
[i
+1:]
394 // Update MemProfileRate directly here since it
395 // is int, not int32, and should only be updated
396 // if specified in GODEBUG.
397 if key
== "memprofilerate" {
398 if n
, ok
:= atoi(value
); ok
{
402 for _
, v
:= range dbgvars
{
404 if n
, ok
:= atoi32(value
); ok
{
412 setTraceback(gogetenv("GOTRACEBACK"))
413 traceback_env
= traceback_cache
416 //go:linkname setTraceback runtime_debug.SetTraceback
417 func setTraceback(level
string) {
423 t
= 1 << tracebackShift
425 t
= 1<<tracebackShift | tracebackAll
427 t
= 2<<tracebackShift | tracebackAll
429 t
= 2<<tracebackShift | tracebackAll | tracebackCrash
432 if n
, ok
:= atoi(level
); ok
&& n
== int(uint32(n
)) {
433 t |
= uint32(n
) << tracebackShift
436 // when C owns the process, simply exit'ing the process on fatal errors
437 // and panics is surprising. Be louder and abort instead.
438 if islibrary || isarchive
{
444 atomic
.Store(&traceback_cache
, t
)
447 // Poor mans 64-bit division.
448 // This is a very special function, do not use it if you are not sure what you are doing.
449 // int64 division is lowered into _divv() call on 386, which does not fit into nosplit functions.
450 // Handles overflow in a time-specific manner.
452 func timediv(v
int64, div
int32, rem
*int32) int32 {
454 for bit
:= 30; bit
>= 0; bit
-- {
455 if v
>= int64(div
)<<uint(bit
) {
456 v
= v
- (int64(div
) << uint(bit
))
457 res
+= 1 << uint(bit
)
472 // Helpers for Go. Must be NOSPLIT, must only call NOSPLIT functions, and must not block.
482 func releasem(mp
*m
) {
485 // if mp.locks == 0 && _g_.preempt {
486 // // restore the preemption request in case we've cleared it in newstack
487 // _g_.stackguard0 = stackPreempt
492 func gomcache() *mcache
{
493 return getg().m
.mcache