1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
10 "runtime/internal/atomic"
14 // For gccgo, while we still have C runtime code, use go:linkname to
15 // export some functions to themselves.
17 //go:linkname gotraceback
21 //go:linkname goenvs_unix
22 //go:linkname parsedebugvars
25 // Keep a cached value to make gotraceback fast,
26 // since we call it on every call to gentraceback.
27 // The cached value is a uint32 in which the low bits
28 // are the "crash" and "all" settings and the remaining
29 // bits are the traceback value (0 off, 1 on, 2 include system).
31 tracebackCrash
= 1 << iota
36 var traceback_cache
uint32 = 2 << tracebackShift
37 var traceback_env
uint32
39 // gotraceback returns the current traceback settings.
41 // If level is 0, suppress all tracebacks.
42 // If level is 1, show tracebacks, but exclude runtime frames.
43 // If level is 2, show tracebacks including runtime frames.
44 // If all is set, print all goroutine stacks. Otherwise, print just the current goroutine.
45 // If crash is set, crash (core dump, etc) after tracebacking.
48 func gotraceback() (level
int32, all
, crash
bool) {
50 t
:= atomic
.Load(&traceback_cache
)
51 crash
= t
&tracebackCrash
!= 0
52 all
= _g_
.m
.throwing
> 0 || t
&tracebackAll
!= 0
53 if _g_
.m
.traceback
!= 0 {
54 level
= int32(_g_
.m
.traceback
)
56 level
= int32(t
>> tracebackShift
)
66 // nosplit for use in linux startup sysargs
68 func argv_index(argv
**byte, i
int32) *byte {
69 return *(**byte)(add(unsafe
.Pointer(argv
), uintptr(i
)*goarch
.PtrSize
))
72 func args(c
int32, v
**byte) {
79 if GOOS
== "windows" {
82 argslice
= make([]string, argc
)
83 for i
:= int32(0); i
< argc
; i
++ {
84 argslice
[i
] = gostringnocopy(argv_index(argv
, i
))
89 // TODO(austin): ppc64 in dynamic linking mode doesn't
90 // guarantee env[] will immediately follow argv. Might cause
93 for argv_index(argv
, argc
+1+n
) != nil {
97 envs
= make([]string, n
)
98 for i
:= int32(0); i
< n
; i
++ {
99 envs
[i
] = gostring(argv_index(argv
, argc
+1+i
))
103 func environ() []string {
107 // TODO: These should be locals in testAtomic64, but we don't 8-byte
108 // align stack variables on 386.
109 var test_z64
, test_x64
uint64
111 func testAtomic64() {
114 if atomic
.Cas64(&test_z64
, test_x64
, 1) {
115 throw("cas64 failed")
118 throw("cas64 failed")
121 if !atomic
.Cas64(&test_z64
, test_x64
, 1) {
122 throw("cas64 failed")
124 if test_x64
!= 42 || test_z64
!= 1 {
125 throw("cas64 failed")
127 if atomic
.Load64(&test_z64
) != 1 {
128 throw("load64 failed")
130 atomic
.Store64(&test_z64
, (1<<40)+1)
131 if atomic
.Load64(&test_z64
) != (1<<40)+1 {
132 throw("store64 failed")
134 if atomic
.Xadd64(&test_z64
, (1<<40)+1) != (2<<40)+2 {
135 throw("xadd64 failed")
137 if atomic
.Load64(&test_z64
) != (2<<40)+2 {
138 throw("xadd64 failed")
140 if atomic
.Xchg64(&test_z64
, (3<<40)+3) != (2<<40)+2 {
141 throw("xchg64 failed")
143 if atomic
.Load64(&test_z64
) != (3<<40)+3 {
144 throw("xchg64 failed")
174 if unsafe
.Sizeof(a
) != 1 {
177 if unsafe
.Sizeof(b
) != 1 {
180 if unsafe
.Sizeof(c
) != 2 {
183 if unsafe
.Sizeof(d
) != 2 {
186 if unsafe
.Sizeof(e
) != 4 {
189 if unsafe
.Sizeof(f
) != 4 {
192 if unsafe
.Sizeof(g
) != 8 {
195 if unsafe
.Sizeof(h
) != 8 {
198 if unsafe
.Sizeof(i
) != 4 {
201 if unsafe
.Sizeof(j
) != 8 {
204 if unsafe
.Sizeof(k
) != goarch
.PtrSize
{
207 if unsafe
.Sizeof(l
) != goarch
.PtrSize
{
210 if unsafe
.Sizeof(x1
) != 1 {
211 throw("bad unsafe.Sizeof x1")
213 if unsafe
.Offsetof(y1
.y
) != 1 {
214 throw("bad offsetof y1.y")
216 if unsafe
.Sizeof(y1
) != 2 {
217 throw("bad unsafe.Sizeof y1")
220 if timediv(12345*1000000000+54321, 1000000000, &e
) != 12345 || e
!= 54321 {
226 if !atomic
.Cas(&z
, 1, 2) {
234 if atomic
.Cas(&z
, 5, 6) {
242 if !atomic
.Cas(&z
, 0xffffffff, 0xfffffffe) {
249 m
= [4]byte{1, 1, 1, 1}
250 atomic
.Or8(&m
[1], 0xf0)
251 if m
[0] != 1 || m
[1] != 0xf1 || m
[2] != 1 || m
[3] != 1 {
255 m
= [4]byte{0xff, 0xff, 0xff, 0xff}
256 atomic
.And8(&m
[1], 0x1)
257 if m
[0] != 0xff || m
[1] != 0x1 || m
[2] != 0xff || m
[3] != 0xff {
261 *(*uint64)(unsafe
.Pointer(&j
)) = ^uint64(0)
269 *(*uint64)(unsafe
.Pointer(&j1
)) = ^uint64(1)
277 *(*uint32)(unsafe
.Pointer(&i
)) = ^uint32(0)
285 *(*uint32)(unsafe
.Pointer(&i1
)) = ^uint32(1)
295 // if _FixedStack != round2(_FixedStack) {
296 // throw("FixedStack is not power-of-2")
300 throw("assembly checks failed")
309 // Holds variables parsed from GODEBUG env var,
310 // except for "memprofilerate" since there is an
311 // existing int var for that value, which may
312 // already have an initial value.
319 gcshrinkstackoff
int32
323 madvdontneed
int32 // for Linux; issue 28466
327 tracebackancestors
int32
328 asyncpreemptoff
int32
331 // debug.malloc is used as a combined debug check
332 // in the malloc function and should be set
333 // if any of the below debug options is != 0.
340 var dbgvars
= []dbgVar
{
341 {"allocfreetrace", &debug
.allocfreetrace
},
342 {"clobberfree", &debug
.clobberfree
},
343 {"cgocheck", &debug
.cgocheck
},
344 {"efence", &debug
.efence
},
345 {"gccheckmark", &debug
.gccheckmark
},
346 {"gcpacertrace", &debug
.gcpacertrace
},
347 {"gcshrinkstackoff", &debug
.gcshrinkstackoff
},
348 {"gcstoptheworld", &debug
.gcstoptheworld
},
349 {"gctrace", &debug
.gctrace
},
350 {"invalidptr", &debug
.invalidptr
},
351 {"madvdontneed", &debug
.madvdontneed
},
352 {"sbrk", &debug
.sbrk
},
353 {"scavtrace", &debug
.scavtrace
},
354 {"scheddetail", &debug
.scheddetail
},
355 {"schedtrace", &debug
.schedtrace
},
356 {"tracebackancestors", &debug
.tracebackancestors
},
357 {"asyncpreemptoff", &debug
.asyncpreemptoff
},
358 {"inittrace", &debug
.inittrace
},
359 {"harddecommit", &debug
.harddecommit
},
362 func parsedebugvars() {
366 // Gccgo uses conservative stack scanning, so we cannot check
367 // invalid pointers on stack. But we can still enable invalid
368 // pointer check on heap scanning. When scanning the heap, we
369 // ensure that we only trace allocated heap objects, which should
370 // not contain invalid pointers.
373 // On Linux, MADV_FREE is faster than MADV_DONTNEED,
374 // but doesn't affect many of the statistics that
375 // MADV_DONTNEED does until the memory is actually
376 // reclaimed. This generally leads to poor user
377 // experience, like confusing stats in top and other
378 // monitoring tools; and bad integration with
379 // management systems that respond to memory usage.
380 // Hence, default to MADV_DONTNEED.
381 debug
.madvdontneed
= 1
384 for p
:= gogetenv("GODEBUG"); p
!= ""; {
386 i
:= bytealg
.IndexByteString(p
, ',')
390 field
, p
= p
[:i
], p
[i
+1:]
392 i
= bytealg
.IndexByteString(field
, '=')
396 key
, value
:= field
[:i
], field
[i
+1:]
398 // Update MemProfileRate directly here since it
399 // is int, not int32, and should only be updated
400 // if specified in GODEBUG.
401 if key
== "memprofilerate" {
402 if n
, ok
:= atoi(value
); ok
{
406 for _
, v
:= range dbgvars
{
408 if n
, ok
:= atoi32(value
); ok
{
416 debug
.malloc
= (debug
.allocfreetrace | debug
.inittrace | debug
.sbrk
) != 0
418 setTraceback(gogetenv("GOTRACEBACK"))
419 traceback_env
= traceback_cache
422 //go:linkname setTraceback runtime_1debug.SetTraceback
423 func setTraceback(level
string) {
429 t
= 1 << tracebackShift
431 t
= 1<<tracebackShift | tracebackAll
433 t
= 2<<tracebackShift | tracebackAll
435 t
= 2<<tracebackShift | tracebackAll | tracebackCrash
438 if n
, ok
:= atoi(level
); ok
&& n
== int(uint32(n
)) {
439 t |
= uint32(n
) << tracebackShift
442 // when C owns the process, simply exit'ing the process on fatal errors
443 // and panics is surprising. Be louder and abort instead.
444 if islibrary || isarchive
{
450 atomic
.Store(&traceback_cache
, t
)
453 // Poor mans 64-bit division.
454 // This is a very special function, do not use it if you are not sure what you are doing.
455 // int64 division is lowered into _divv() call on 386, which does not fit into nosplit functions.
456 // Handles overflow in a time-specific manner.
457 // This keeps us within no-split stack limits on 32-bit processors.
459 func timediv(v
int64, div
int32, rem
*int32) int32 {
461 for bit
:= 30; bit
>= 0; bit
-- {
462 if v
>= int64(div
)<<uint(bit
) {
463 v
= v
- (int64(div
) << uint(bit
))
464 // Before this for loop, res was 0, thus all these
465 // power of 2 increments are now just bitsets.
466 res |
= 1 << uint(bit
)
481 // Helpers for Go. Must be NOSPLIT, must only call NOSPLIT functions, and must not block.
491 func releasem(mp
*m
) {
494 // if mp.locks == 0 && _g_.preempt {
495 // // restore the preemption request in case we've cleared it in newstack
496 // _g_.stackguard0 = stackPreempt