2017-03-02 Richard Biener <rguenther@suse.de>
[official-gcc.git] / libgo / go / runtime / runtime1.go
bloba41cfc81181722d482f206776f6b5b3b69607a5c
1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 package runtime
7 import (
8 "runtime/internal/atomic"
9 "runtime/internal/sys"
10 "unsafe"
13 // For gccgo, while we still have C runtime code, use go:linkname to
14 // rename some functions to themselves, so that the compiler will
15 // export them.
17 //go:linkname gotraceback runtime.gotraceback
18 //go:linkname args runtime.args
19 //go:linkname goargs runtime.goargs
20 //go:linkname check runtime.check
21 //go:linkname goenvs_unix runtime.goenvs_unix
22 //go:linkname parsedebugvars runtime.parsedebugvars
23 //go:linkname timediv runtime.timediv
25 // Keep a cached value to make gotraceback fast,
26 // since we call it on every call to gentraceback.
27 // The cached value is a uint32 in which the low bits
28 // are the "crash" and "all" settings and the remaining
29 // bits are the traceback value (0 off, 1 on, 2 include system).
30 const (
31 tracebackCrash = 1 << iota
32 tracebackAll
33 tracebackShift = iota
36 var traceback_cache uint32 = 2 << tracebackShift
37 var traceback_env uint32
39 // gotraceback returns the current traceback settings.
41 // If level is 0, suppress all tracebacks.
42 // If level is 1, show tracebacks, but exclude runtime frames.
43 // If level is 2, show tracebacks including runtime frames.
44 // If all is set, print all goroutine stacks. Otherwise, print just the current goroutine.
45 // If crash is set, crash (core dump, etc) after tracebacking.
47 //go:nosplit
48 func gotraceback() (level int32, all, crash bool) {
49 _g_ := getg()
50 all = _g_.m.throwing > 0
51 if _g_.m.traceback != 0 {
52 level = int32(_g_.m.traceback)
53 return
55 t := atomic.Load(&traceback_cache)
56 crash = t&tracebackCrash != 0
57 all = all || t&tracebackAll != 0
58 level = int32(t >> tracebackShift)
59 return
62 var (
63 argc int32
64 argv **byte
67 // nosplit for use in linux startup sysargs
68 //go:nosplit
69 func argv_index(argv **byte, i int32) *byte {
70 return *(**byte)(add(unsafe.Pointer(argv), uintptr(i)*sys.PtrSize))
73 func args(c int32, v **byte) {
74 argc = c
75 argv = v
76 sysargs(c, v)
79 func goargs() {
80 if GOOS == "windows" {
81 return
83 argslice = make([]string, argc)
84 for i := int32(0); i < argc; i++ {
85 argslice[i] = gostringnocopy(argv_index(argv, i))
89 func goenvs_unix() {
90 // TODO(austin): ppc64 in dynamic linking mode doesn't
91 // guarantee env[] will immediately follow argv. Might cause
92 // problems.
93 n := int32(0)
94 for argv_index(argv, argc+1+n) != nil {
95 n++
98 envs = make([]string, n)
99 for i := int32(0); i < n; i++ {
100 envs[i] = gostring(argv_index(argv, argc+1+i))
104 func environ() []string {
105 return envs
108 // TODO: These should be locals in testAtomic64, but we don't 8-byte
109 // align stack variables on 386.
110 var test_z64, test_x64 uint64
112 func testAtomic64() {
113 test_z64 = 42
114 test_x64 = 0
115 // prefetcht0(uintptr(unsafe.Pointer(&test_z64)))
116 // prefetcht1(uintptr(unsafe.Pointer(&test_z64)))
117 // prefetcht2(uintptr(unsafe.Pointer(&test_z64)))
118 // prefetchnta(uintptr(unsafe.Pointer(&test_z64)))
119 if atomic.Cas64(&test_z64, test_x64, 1) {
120 throw("cas64 failed")
122 if test_x64 != 0 {
123 throw("cas64 failed")
125 test_x64 = 42
126 if !atomic.Cas64(&test_z64, test_x64, 1) {
127 throw("cas64 failed")
129 if test_x64 != 42 || test_z64 != 1 {
130 throw("cas64 failed")
132 if atomic.Load64(&test_z64) != 1 {
133 throw("load64 failed")
135 atomic.Store64(&test_z64, (1<<40)+1)
136 if atomic.Load64(&test_z64) != (1<<40)+1 {
137 throw("store64 failed")
139 if atomic.Xadd64(&test_z64, (1<<40)+1) != (2<<40)+2 {
140 throw("xadd64 failed")
142 if atomic.Load64(&test_z64) != (2<<40)+2 {
143 throw("xadd64 failed")
145 if atomic.Xchg64(&test_z64, (3<<40)+3) != (2<<40)+2 {
146 throw("xchg64 failed")
148 if atomic.Load64(&test_z64) != (3<<40)+3 {
149 throw("xchg64 failed")
153 func check() {
155 // This doesn't currently work for gccgo. Because escape
156 // analysis is not turned on by default, the code below that
157 // takes the address of local variables causes memory
158 // allocation, but this function is called before the memory
159 // allocator has been initialized.
160 return
162 var (
163 a int8
164 b uint8
165 c int16
166 d uint16
167 e int32
168 f uint32
169 g int64
170 h uint64
171 i, i1 float32
172 j, j1 float64
173 k, k1 unsafe.Pointer
174 l *uint16
175 m [4]byte
177 type x1t struct {
178 x uint8
180 type y1t struct {
181 x1 x1t
182 y uint8
184 var x1 x1t
185 var y1 y1t
187 if unsafe.Sizeof(a) != 1 {
188 throw("bad a")
190 if unsafe.Sizeof(b) != 1 {
191 throw("bad b")
193 if unsafe.Sizeof(c) != 2 {
194 throw("bad c")
196 if unsafe.Sizeof(d) != 2 {
197 throw("bad d")
199 if unsafe.Sizeof(e) != 4 {
200 throw("bad e")
202 if unsafe.Sizeof(f) != 4 {
203 throw("bad f")
205 if unsafe.Sizeof(g) != 8 {
206 throw("bad g")
208 if unsafe.Sizeof(h) != 8 {
209 throw("bad h")
211 if unsafe.Sizeof(i) != 4 {
212 throw("bad i")
214 if unsafe.Sizeof(j) != 8 {
215 throw("bad j")
217 if unsafe.Sizeof(k) != sys.PtrSize {
218 throw("bad k")
220 if unsafe.Sizeof(l) != sys.PtrSize {
221 throw("bad l")
223 if unsafe.Sizeof(x1) != 1 {
224 throw("bad unsafe.Sizeof x1")
226 if unsafe.Offsetof(y1.y) != 1 {
227 throw("bad offsetof y1.y")
229 if unsafe.Sizeof(y1) != 2 {
230 throw("bad unsafe.Sizeof y1")
233 if timediv(12345*1000000000+54321, 1000000000, &e) != 12345 || e != 54321 {
234 throw("bad timediv")
237 var z uint32
238 z = 1
239 if !atomic.Cas(&z, 1, 2) {
240 throw("cas1")
242 if z != 2 {
243 throw("cas2")
246 z = 4
247 if atomic.Cas(&z, 5, 6) {
248 throw("cas3")
250 if z != 4 {
251 throw("cas4")
254 z = 0xffffffff
255 if !atomic.Cas(&z, 0xffffffff, 0xfffffffe) {
256 throw("cas5")
258 if z != 0xfffffffe {
259 throw("cas6")
262 k = unsafe.Pointer(uintptr(0xfedcb123))
263 if sys.PtrSize == 8 {
264 k = unsafe.Pointer(uintptr(k) << 10)
266 if casp(&k, nil, nil) {
267 throw("casp1")
269 k1 = add(k, 1)
270 if !casp(&k, k, k1) {
271 throw("casp2")
273 if k != k1 {
274 throw("casp3")
277 m = [4]byte{1, 1, 1, 1}
278 atomic.Or8(&m[1], 0xf0)
279 if m[0] != 1 || m[1] != 0xf1 || m[2] != 1 || m[3] != 1 {
280 throw("atomicor8")
283 *(*uint64)(unsafe.Pointer(&j)) = ^uint64(0)
284 if j == j {
285 throw("float64nan")
287 if !(j != j) {
288 throw("float64nan1")
291 *(*uint64)(unsafe.Pointer(&j1)) = ^uint64(1)
292 if j == j1 {
293 throw("float64nan2")
295 if !(j != j1) {
296 throw("float64nan3")
299 *(*uint32)(unsafe.Pointer(&i)) = ^uint32(0)
300 if i == i {
301 throw("float32nan")
303 if i == i {
304 throw("float32nan1")
307 *(*uint32)(unsafe.Pointer(&i1)) = ^uint32(1)
308 if i == i1 {
309 throw("float32nan2")
311 if i == i1 {
312 throw("float32nan3")
315 testAtomic64()
317 // if _FixedStack != round2(_FixedStack) {
318 // throw("FixedStack is not power-of-2")
319 // }
321 if !checkASM() {
322 throw("assembly checks failed")
326 type dbgVar struct {
327 name string
328 value *int32
331 // Holds variables parsed from GODEBUG env var,
332 // except for "memprofilerate" since there is an
333 // existing int var for that value, which may
334 // already have an initial value.
336 // For gccgo we use a named type so that the C code can see the
337 // definition.
338 type debugVars struct {
339 allocfreetrace int32
340 cgocheck int32
341 efence int32
342 gccheckmark int32
343 gcpacertrace int32
344 gcshrinkstackoff int32
345 gcstackbarrieroff int32
346 gcstackbarrierall int32
347 gcrescanstacks int32
348 gcstoptheworld int32
349 gctrace int32
350 invalidptr int32
351 sbrk int32
352 scavenge int32
353 scheddetail int32
354 schedtrace int32
355 wbshadow int32
358 var debug debugVars
360 // For gccgo's C code.
361 //extern runtime_setdebug
362 func runtime_setdebug(*debugVars)
364 var dbgvars = []dbgVar{
365 {"allocfreetrace", &debug.allocfreetrace},
366 {"cgocheck", &debug.cgocheck},
367 {"efence", &debug.efence},
368 {"gccheckmark", &debug.gccheckmark},
369 {"gcpacertrace", &debug.gcpacertrace},
370 {"gcshrinkstackoff", &debug.gcshrinkstackoff},
371 {"gcstackbarrieroff", &debug.gcstackbarrieroff},
372 {"gcstackbarrierall", &debug.gcstackbarrierall},
373 {"gcrescanstacks", &debug.gcrescanstacks},
374 {"gcstoptheworld", &debug.gcstoptheworld},
375 {"gctrace", &debug.gctrace},
376 {"invalidptr", &debug.invalidptr},
377 {"sbrk", &debug.sbrk},
378 {"scavenge", &debug.scavenge},
379 {"scheddetail", &debug.scheddetail},
380 {"schedtrace", &debug.schedtrace},
381 {"wbshadow", &debug.wbshadow},
384 func parsedebugvars() {
385 // defaults
386 debug.cgocheck = 1
387 debug.invalidptr = 1
389 for p := gogetenv("GODEBUG"); p != ""; {
390 field := ""
391 i := index(p, ",")
392 if i < 0 {
393 field, p = p, ""
394 } else {
395 field, p = p[:i], p[i+1:]
397 i = index(field, "=")
398 if i < 0 {
399 continue
401 key, value := field[:i], field[i+1:]
403 // Update MemProfileRate directly here since it
404 // is int, not int32, and should only be updated
405 // if specified in GODEBUG.
406 if key == "memprofilerate" {
407 if n, ok := atoi(value); ok {
408 MemProfileRate = n
410 } else {
411 for _, v := range dbgvars {
412 if v.name == key {
413 if n, ok := atoi32(value); ok {
414 *v.value = n
421 setTraceback(gogetenv("GOTRACEBACK"))
422 traceback_env = traceback_cache
424 if debug.gcrescanstacks == 0 {
425 // Without rescanning, there's no need for stack
426 // barriers.
427 debug.gcstackbarrieroff = 1
428 debug.gcstackbarrierall = 0
431 // if debug.gcstackbarrierall > 0 {
432 // firstStackBarrierOffset = 0
433 // }
435 // For cgocheck > 1, we turn on the write barrier at all times
436 // and check all pointer writes.
437 if debug.cgocheck > 1 {
438 writeBarrier.cgo = true
439 writeBarrier.enabled = true
442 // Tell the C code what the value is.
443 runtime_setdebug(&debug)
446 //go:linkname setTraceback runtime_debug.SetTraceback
447 func setTraceback(level string) {
448 var t uint32
449 switch level {
450 case "none":
451 t = 0
452 case "single", "":
453 t = 1 << tracebackShift
454 case "all":
455 t = 1<<tracebackShift | tracebackAll
456 case "system":
457 t = 2<<tracebackShift | tracebackAll
458 case "crash":
459 t = 2<<tracebackShift | tracebackAll | tracebackCrash
460 default:
461 t = tracebackAll
462 if n, ok := atoi(level); ok && n == int(uint32(n)) {
463 t |= uint32(n) << tracebackShift
466 // when C owns the process, simply exit'ing the process on fatal errors
467 // and panics is surprising. Be louder and abort instead.
468 if islibrary || isarchive {
469 t |= tracebackCrash
472 t |= traceback_env
474 atomic.Store(&traceback_cache, t)
477 // Poor mans 64-bit division.
478 // This is a very special function, do not use it if you are not sure what you are doing.
479 // int64 division is lowered into _divv() call on 386, which does not fit into nosplit functions.
480 // Handles overflow in a time-specific manner.
481 //go:nosplit
482 func timediv(v int64, div int32, rem *int32) int32 {
483 res := int32(0)
484 for bit := 30; bit >= 0; bit-- {
485 if v >= int64(div)<<uint(bit) {
486 v = v - (int64(div) << uint(bit))
487 res += 1 << uint(bit)
490 if v >= int64(div) {
491 if rem != nil {
492 *rem = 0
494 return 0x7fffffff
496 if rem != nil {
497 *rem = int32(v)
499 return res
502 // Helpers for Go. Must be NOSPLIT, must only call NOSPLIT functions, and must not block.
504 //go:nosplit
505 func acquirem() *m {
506 _g_ := getg()
507 _g_.m.locks++
508 return _g_.m
511 //go:nosplit
512 func releasem(mp *m) {
513 // _g_ := getg()
514 mp.locks--
515 // if mp.locks == 0 && _g_.preempt {
516 // // restore the preemption request in case we've cleared it in newstack
517 // _g_.stackguard0 = stackPreempt
518 // }
521 //go:nosplit
522 func gomcache() *mcache {
523 return getg().m.mcache