runtime: scan register backing store on ia64
[official-gcc.git] / libgo / go / runtime / runtime1.go
blobb617f8598fa43c4306dbc60558fccecb61634a0c
1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 package runtime
7 import (
8 "runtime/internal/atomic"
9 "runtime/internal/sys"
10 "unsafe"
13 // For gccgo, while we still have C runtime code, use go:linkname to
14 // rename some functions to themselves, so that the compiler will
15 // export them.
17 //go:linkname gotraceback runtime.gotraceback
18 //go:linkname args runtime.args
19 //go:linkname goargs runtime.goargs
20 //go:linkname check runtime.check
21 //go:linkname goenvs_unix runtime.goenvs_unix
22 //go:linkname parsedebugvars runtime.parsedebugvars
23 //go:linkname timediv runtime.timediv
25 // Keep a cached value to make gotraceback fast,
26 // since we call it on every call to gentraceback.
27 // The cached value is a uint32 in which the low bits
28 // are the "crash" and "all" settings and the remaining
29 // bits are the traceback value (0 off, 1 on, 2 include system).
30 const (
31 tracebackCrash = 1 << iota
32 tracebackAll
33 tracebackShift = iota
36 var traceback_cache uint32 = 2 << tracebackShift
37 var traceback_env uint32
39 // gotraceback returns the current traceback settings.
41 // If level is 0, suppress all tracebacks.
42 // If level is 1, show tracebacks, but exclude runtime frames.
43 // If level is 2, show tracebacks including runtime frames.
44 // If all is set, print all goroutine stacks. Otherwise, print just the current goroutine.
45 // If crash is set, crash (core dump, etc) after tracebacking.
47 //go:nosplit
48 func gotraceback() (level int32, all, crash bool) {
49 _g_ := getg()
50 t := atomic.Load(&traceback_cache)
51 crash = t&tracebackCrash != 0
52 all = _g_.m.throwing > 0 || t&tracebackAll != 0
53 if _g_.m.traceback != 0 {
54 level = int32(_g_.m.traceback)
55 } else {
56 level = int32(t >> tracebackShift)
58 return
61 var (
62 argc int32
63 argv **byte
66 // nosplit for use in linux startup sysargs
67 //go:nosplit
68 func argv_index(argv **byte, i int32) *byte {
69 return *(**byte)(add(unsafe.Pointer(argv), uintptr(i)*sys.PtrSize))
72 func args(c int32, v **byte) {
73 argc = c
74 argv = v
75 sysargs(c, v)
78 func goargs() {
79 if GOOS == "windows" {
80 return
82 argslice = make([]string, argc)
83 for i := int32(0); i < argc; i++ {
84 argslice[i] = gostringnocopy(argv_index(argv, i))
88 func goenvs_unix() {
89 // TODO(austin): ppc64 in dynamic linking mode doesn't
90 // guarantee env[] will immediately follow argv. Might cause
91 // problems.
92 n := int32(0)
93 for argv_index(argv, argc+1+n) != nil {
94 n++
97 envs = make([]string, n)
98 for i := int32(0); i < n; i++ {
99 envs[i] = gostring(argv_index(argv, argc+1+i))
103 func environ() []string {
104 return envs
107 // TODO: These should be locals in testAtomic64, but we don't 8-byte
108 // align stack variables on 386.
109 var test_z64, test_x64 uint64
111 func testAtomic64() {
112 test_z64 = 42
113 test_x64 = 0
114 if atomic.Cas64(&test_z64, test_x64, 1) {
115 throw("cas64 failed")
117 if test_x64 != 0 {
118 throw("cas64 failed")
120 test_x64 = 42
121 if !atomic.Cas64(&test_z64, test_x64, 1) {
122 throw("cas64 failed")
124 if test_x64 != 42 || test_z64 != 1 {
125 throw("cas64 failed")
127 if atomic.Load64(&test_z64) != 1 {
128 throw("load64 failed")
130 atomic.Store64(&test_z64, (1<<40)+1)
131 if atomic.Load64(&test_z64) != (1<<40)+1 {
132 throw("store64 failed")
134 if atomic.Xadd64(&test_z64, (1<<40)+1) != (2<<40)+2 {
135 throw("xadd64 failed")
137 if atomic.Load64(&test_z64) != (2<<40)+2 {
138 throw("xadd64 failed")
140 if atomic.Xchg64(&test_z64, (3<<40)+3) != (2<<40)+2 {
141 throw("xchg64 failed")
143 if atomic.Load64(&test_z64) != (3<<40)+3 {
144 throw("xchg64 failed")
148 func check() {
149 var (
150 a int8
151 b uint8
152 c int16
153 d uint16
154 e int32
155 f uint32
156 g int64
157 h uint64
158 i, i1 float32
159 j, j1 float64
160 k, k1 unsafe.Pointer
161 l *uint16
162 m [4]byte
164 type x1t struct {
165 x uint8
167 type y1t struct {
168 x1 x1t
169 y uint8
171 var x1 x1t
172 var y1 y1t
174 if unsafe.Sizeof(a) != 1 {
175 throw("bad a")
177 if unsafe.Sizeof(b) != 1 {
178 throw("bad b")
180 if unsafe.Sizeof(c) != 2 {
181 throw("bad c")
183 if unsafe.Sizeof(d) != 2 {
184 throw("bad d")
186 if unsafe.Sizeof(e) != 4 {
187 throw("bad e")
189 if unsafe.Sizeof(f) != 4 {
190 throw("bad f")
192 if unsafe.Sizeof(g) != 8 {
193 throw("bad g")
195 if unsafe.Sizeof(h) != 8 {
196 throw("bad h")
198 if unsafe.Sizeof(i) != 4 {
199 throw("bad i")
201 if unsafe.Sizeof(j) != 8 {
202 throw("bad j")
204 if unsafe.Sizeof(k) != sys.PtrSize {
205 throw("bad k")
207 if unsafe.Sizeof(l) != sys.PtrSize {
208 throw("bad l")
210 if unsafe.Sizeof(x1) != 1 {
211 throw("bad unsafe.Sizeof x1")
213 if unsafe.Offsetof(y1.y) != 1 {
214 throw("bad offsetof y1.y")
216 if unsafe.Sizeof(y1) != 2 {
217 throw("bad unsafe.Sizeof y1")
220 if timediv(12345*1000000000+54321, 1000000000, &e) != 12345 || e != 54321 {
221 throw("bad timediv")
224 var z uint32
225 z = 1
226 if !atomic.Cas(&z, 1, 2) {
227 throw("cas1")
229 if z != 2 {
230 throw("cas2")
233 z = 4
234 if atomic.Cas(&z, 5, 6) {
235 throw("cas3")
237 if z != 4 {
238 throw("cas4")
241 z = 0xffffffff
242 if !atomic.Cas(&z, 0xffffffff, 0xfffffffe) {
243 throw("cas5")
245 if z != 0xfffffffe {
246 throw("cas6")
249 k = unsafe.Pointer(uintptr(0xfedcb123))
250 if sys.PtrSize == 8 {
251 k = unsafe.Pointer(uintptr(k) << 10)
253 if casp(&k, nil, nil) {
254 throw("casp1")
256 k1 = add(k, 1)
257 if !casp(&k, k, k1) {
258 throw("casp2")
260 if k != k1 {
261 throw("casp3")
264 m = [4]byte{1, 1, 1, 1}
265 atomic.Or8(&m[1], 0xf0)
266 if m[0] != 1 || m[1] != 0xf1 || m[2] != 1 || m[3] != 1 {
267 throw("atomicor8")
270 m = [4]byte{0xff, 0xff, 0xff, 0xff}
271 atomic.And8(&m[1], 0x1)
272 if m[0] != 0xff || m[1] != 0x1 || m[2] != 0xff || m[3] != 0xff {
273 throw("atomicand8")
276 *(*uint64)(unsafe.Pointer(&j)) = ^uint64(0)
277 if j == j {
278 throw("float64nan")
280 if !(j != j) {
281 throw("float64nan1")
284 *(*uint64)(unsafe.Pointer(&j1)) = ^uint64(1)
285 if j == j1 {
286 throw("float64nan2")
288 if !(j != j1) {
289 throw("float64nan3")
292 *(*uint32)(unsafe.Pointer(&i)) = ^uint32(0)
293 if i == i {
294 throw("float32nan")
296 if i == i {
297 throw("float32nan1")
300 *(*uint32)(unsafe.Pointer(&i1)) = ^uint32(1)
301 if i == i1 {
302 throw("float32nan2")
304 if i == i1 {
305 throw("float32nan3")
308 testAtomic64()
310 // if _FixedStack != round2(_FixedStack) {
311 // throw("FixedStack is not power-of-2")
312 // }
314 if !checkASM() {
315 throw("assembly checks failed")
319 type dbgVar struct {
320 name string
321 value *int32
324 // Holds variables parsed from GODEBUG env var,
325 // except for "memprofilerate" since there is an
326 // existing int var for that value, which may
327 // already have an initial value.
328 var debug struct {
329 allocfreetrace int32
330 cgocheck int32
331 efence int32
332 gccheckmark int32
333 gcpacertrace int32
334 gcshrinkstackoff int32
335 gcrescanstacks int32
336 gcstoptheworld int32
337 gctrace int32
338 invalidptr int32
339 sbrk int32
340 scavenge int32
341 scheddetail int32
342 schedtrace int32
345 var dbgvars = []dbgVar{
346 {"allocfreetrace", &debug.allocfreetrace},
347 {"cgocheck", &debug.cgocheck},
348 {"efence", &debug.efence},
349 {"gccheckmark", &debug.gccheckmark},
350 {"gcpacertrace", &debug.gcpacertrace},
351 {"gcshrinkstackoff", &debug.gcshrinkstackoff},
352 {"gcrescanstacks", &debug.gcrescanstacks},
353 {"gcstoptheworld", &debug.gcstoptheworld},
354 {"gctrace", &debug.gctrace},
355 {"invalidptr", &debug.invalidptr},
356 {"sbrk", &debug.sbrk},
357 {"scavenge", &debug.scavenge},
358 {"scheddetail", &debug.scheddetail},
359 {"schedtrace", &debug.schedtrace},
362 func parsedebugvars() {
363 // defaults
364 debug.cgocheck = 1
366 // Unfortunately, because gccgo uses conservative stack scanning,
367 // we can not enable invalid pointer checking. It is possible for
368 // memory block M1 to point to M2, and for both to be dead.
369 // We release M2, causing the entire span to be released.
370 // Before we release M1, a stack pointer appears that point into it.
371 // This stack pointer is presumably dead, but causes M1 to be marked.
372 // We scan M1 and see the pointer to M2 on a released span.
373 // At that point, if debug.invalidptr is set, we crash.
374 // This is not a problem, assuming that M1 really is dead and
375 // the pointer we discovered to it will not be used.
376 // debug.invalidptr = 1
378 for p := gogetenv("GODEBUG"); p != ""; {
379 field := ""
380 i := index(p, ",")
381 if i < 0 {
382 field, p = p, ""
383 } else {
384 field, p = p[:i], p[i+1:]
386 i = index(field, "=")
387 if i < 0 {
388 continue
390 key, value := field[:i], field[i+1:]
392 // Update MemProfileRate directly here since it
393 // is int, not int32, and should only be updated
394 // if specified in GODEBUG.
395 if key == "memprofilerate" {
396 if n, ok := atoi(value); ok {
397 MemProfileRate = n
399 } else {
400 for _, v := range dbgvars {
401 if v.name == key {
402 if n, ok := atoi32(value); ok {
403 *v.value = n
410 setTraceback(gogetenv("GOTRACEBACK"))
411 traceback_env = traceback_cache
414 //go:linkname setTraceback runtime_debug.SetTraceback
415 func setTraceback(level string) {
416 var t uint32
417 switch level {
418 case "none":
419 t = 0
420 case "single", "":
421 t = 1 << tracebackShift
422 case "all":
423 t = 1<<tracebackShift | tracebackAll
424 case "system":
425 t = 2<<tracebackShift | tracebackAll
426 case "crash":
427 t = 2<<tracebackShift | tracebackAll | tracebackCrash
428 default:
429 t = tracebackAll
430 if n, ok := atoi(level); ok && n == int(uint32(n)) {
431 t |= uint32(n) << tracebackShift
434 // when C owns the process, simply exit'ing the process on fatal errors
435 // and panics is surprising. Be louder and abort instead.
436 if islibrary || isarchive {
437 t |= tracebackCrash
440 t |= traceback_env
442 atomic.Store(&traceback_cache, t)
445 // Poor mans 64-bit division.
446 // This is a very special function, do not use it if you are not sure what you are doing.
447 // int64 division is lowered into _divv() call on 386, which does not fit into nosplit functions.
448 // Handles overflow in a time-specific manner.
449 //go:nosplit
450 func timediv(v int64, div int32, rem *int32) int32 {
451 res := int32(0)
452 for bit := 30; bit >= 0; bit-- {
453 if v >= int64(div)<<uint(bit) {
454 v = v - (int64(div) << uint(bit))
455 res += 1 << uint(bit)
458 if v >= int64(div) {
459 if rem != nil {
460 *rem = 0
462 return 0x7fffffff
464 if rem != nil {
465 *rem = int32(v)
467 return res
470 // Helpers for Go. Must be NOSPLIT, must only call NOSPLIT functions, and must not block.
472 //go:nosplit
473 func acquirem() *m {
474 _g_ := getg()
475 _g_.m.locks++
476 return _g_.m
479 //go:nosplit
480 func releasem(mp *m) {
481 // _g_ := getg()
482 mp.locks--
483 // if mp.locks == 0 && _g_.preempt {
484 // // restore the preemption request in case we've cleared it in newstack
485 // _g_.stackguard0 = stackPreempt
486 // }
489 //go:nosplit
490 func gomcache() *mcache {
491 return getg().m.mcache