libgo: update to Go 1.11
[official-gcc.git] / libgo / go / runtime / runtime1.go
blob8b1b0a0b3b90e6e1f05af6b5506d64308b538bca
1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 package runtime
7 import (
8 "runtime/internal/atomic"
9 "runtime/internal/sys"
10 "unsafe"
13 // For gccgo, while we still have C runtime code, use go:linkname to
14 // rename some functions to themselves, so that the compiler will
15 // export them.
17 //go:linkname gotraceback runtime.gotraceback
18 //go:linkname args runtime.args
19 //go:linkname goargs runtime.goargs
20 //go:linkname check runtime.check
21 //go:linkname goenvs_unix runtime.goenvs_unix
22 //go:linkname parsedebugvars runtime.parsedebugvars
23 //go:linkname timediv runtime.timediv
25 // Keep a cached value to make gotraceback fast,
26 // since we call it on every call to gentraceback.
27 // The cached value is a uint32 in which the low bits
28 // are the "crash" and "all" settings and the remaining
29 // bits are the traceback value (0 off, 1 on, 2 include system).
30 const (
31 tracebackCrash = 1 << iota
32 tracebackAll
33 tracebackShift = iota
36 var traceback_cache uint32 = 2 << tracebackShift
37 var traceback_env uint32
39 // gotraceback returns the current traceback settings.
41 // If level is 0, suppress all tracebacks.
42 // If level is 1, show tracebacks, but exclude runtime frames.
43 // If level is 2, show tracebacks including runtime frames.
44 // If all is set, print all goroutine stacks. Otherwise, print just the current goroutine.
45 // If crash is set, crash (core dump, etc) after tracebacking.
47 //go:nosplit
48 func gotraceback() (level int32, all, crash bool) {
49 _g_ := getg()
50 t := atomic.Load(&traceback_cache)
51 crash = t&tracebackCrash != 0
52 all = _g_.m.throwing > 0 || t&tracebackAll != 0
53 if _g_.m.traceback != 0 {
54 level = int32(_g_.m.traceback)
55 } else {
56 level = int32(t >> tracebackShift)
58 return
61 var (
62 argc int32
63 argv **byte
66 // nosplit for use in linux startup sysargs
67 //go:nosplit
68 func argv_index(argv **byte, i int32) *byte {
69 return *(**byte)(add(unsafe.Pointer(argv), uintptr(i)*sys.PtrSize))
72 func args(c int32, v **byte) {
73 argc = c
74 argv = v
75 sysargs(c, v)
78 func goargs() {
79 if GOOS == "windows" {
80 return
82 argslice = make([]string, argc)
83 for i := int32(0); i < argc; i++ {
84 argslice[i] = gostringnocopy(argv_index(argv, i))
88 func goenvs_unix() {
89 // TODO(austin): ppc64 in dynamic linking mode doesn't
90 // guarantee env[] will immediately follow argv. Might cause
91 // problems.
92 n := int32(0)
93 for argv_index(argv, argc+1+n) != nil {
94 n++
97 envs = make([]string, n)
98 for i := int32(0); i < n; i++ {
99 envs[i] = gostring(argv_index(argv, argc+1+i))
103 func environ() []string {
104 return envs
107 // TODO: These should be locals in testAtomic64, but we don't 8-byte
108 // align stack variables on 386.
109 var test_z64, test_x64 uint64
111 func testAtomic64() {
112 test_z64 = 42
113 test_x64 = 0
114 if atomic.Cas64(&test_z64, test_x64, 1) {
115 throw("cas64 failed")
117 if test_x64 != 0 {
118 throw("cas64 failed")
120 test_x64 = 42
121 if !atomic.Cas64(&test_z64, test_x64, 1) {
122 throw("cas64 failed")
124 if test_x64 != 42 || test_z64 != 1 {
125 throw("cas64 failed")
127 if atomic.Load64(&test_z64) != 1 {
128 throw("load64 failed")
130 atomic.Store64(&test_z64, (1<<40)+1)
131 if atomic.Load64(&test_z64) != (1<<40)+1 {
132 throw("store64 failed")
134 if atomic.Xadd64(&test_z64, (1<<40)+1) != (2<<40)+2 {
135 throw("xadd64 failed")
137 if atomic.Load64(&test_z64) != (2<<40)+2 {
138 throw("xadd64 failed")
140 if atomic.Xchg64(&test_z64, (3<<40)+3) != (2<<40)+2 {
141 throw("xchg64 failed")
143 if atomic.Load64(&test_z64) != (3<<40)+3 {
144 throw("xchg64 failed")
148 func check() {
149 var (
150 a int8
151 b uint8
152 c int16
153 d uint16
154 e int32
155 f uint32
156 g int64
157 h uint64
158 i, i1 float32
159 j, j1 float64
160 k, k1 unsafe.Pointer
161 l *uint16
162 m [4]byte
164 type x1t struct {
165 x uint8
167 type y1t struct {
168 x1 x1t
169 y uint8
171 var x1 x1t
172 var y1 y1t
174 if unsafe.Sizeof(a) != 1 {
175 throw("bad a")
177 if unsafe.Sizeof(b) != 1 {
178 throw("bad b")
180 if unsafe.Sizeof(c) != 2 {
181 throw("bad c")
183 if unsafe.Sizeof(d) != 2 {
184 throw("bad d")
186 if unsafe.Sizeof(e) != 4 {
187 throw("bad e")
189 if unsafe.Sizeof(f) != 4 {
190 throw("bad f")
192 if unsafe.Sizeof(g) != 8 {
193 throw("bad g")
195 if unsafe.Sizeof(h) != 8 {
196 throw("bad h")
198 if unsafe.Sizeof(i) != 4 {
199 throw("bad i")
201 if unsafe.Sizeof(j) != 8 {
202 throw("bad j")
204 if unsafe.Sizeof(k) != sys.PtrSize {
205 throw("bad k")
207 if unsafe.Sizeof(l) != sys.PtrSize {
208 throw("bad l")
210 if unsafe.Sizeof(x1) != 1 {
211 throw("bad unsafe.Sizeof x1")
213 if unsafe.Offsetof(y1.y) != 1 {
214 throw("bad offsetof y1.y")
216 if unsafe.Sizeof(y1) != 2 {
217 throw("bad unsafe.Sizeof y1")
220 if timediv(12345*1000000000+54321, 1000000000, &e) != 12345 || e != 54321 {
221 throw("bad timediv")
224 var z uint32
225 z = 1
226 if !atomic.Cas(&z, 1, 2) {
227 throw("cas1")
229 if z != 2 {
230 throw("cas2")
233 z = 4
234 if atomic.Cas(&z, 5, 6) {
235 throw("cas3")
237 if z != 4 {
238 throw("cas4")
241 z = 0xffffffff
242 if !atomic.Cas(&z, 0xffffffff, 0xfffffffe) {
243 throw("cas5")
245 if z != 0xfffffffe {
246 throw("cas6")
249 k = unsafe.Pointer(uintptr(0xfedcb123))
250 if sys.PtrSize == 8 {
251 k = unsafe.Pointer(uintptr(k) << 10)
253 if casp(&k, nil, nil) {
254 throw("casp1")
256 k1 = add(k, 1)
257 if !casp(&k, k, k1) {
258 throw("casp2")
260 if k != k1 {
261 throw("casp3")
264 m = [4]byte{1, 1, 1, 1}
265 atomic.Or8(&m[1], 0xf0)
266 if m[0] != 1 || m[1] != 0xf1 || m[2] != 1 || m[3] != 1 {
267 throw("atomicor8")
270 m = [4]byte{0xff, 0xff, 0xff, 0xff}
271 atomic.And8(&m[1], 0x1)
272 if m[0] != 0xff || m[1] != 0x1 || m[2] != 0xff || m[3] != 0xff {
273 throw("atomicand8")
276 *(*uint64)(unsafe.Pointer(&j)) = ^uint64(0)
277 if j == j {
278 throw("float64nan")
280 if !(j != j) {
281 throw("float64nan1")
284 *(*uint64)(unsafe.Pointer(&j1)) = ^uint64(1)
285 if j == j1 {
286 throw("float64nan2")
288 if !(j != j1) {
289 throw("float64nan3")
292 *(*uint32)(unsafe.Pointer(&i)) = ^uint32(0)
293 if i == i {
294 throw("float32nan")
296 if i == i {
297 throw("float32nan1")
300 *(*uint32)(unsafe.Pointer(&i1)) = ^uint32(1)
301 if i == i1 {
302 throw("float32nan2")
304 if i == i1 {
305 throw("float32nan3")
308 testAtomic64()
310 // if _FixedStack != round2(_FixedStack) {
311 // throw("FixedStack is not power-of-2")
312 // }
314 if !checkASM() {
315 throw("assembly checks failed")
319 type dbgVar struct {
320 name string
321 value *int32
324 // Holds variables parsed from GODEBUG env var,
325 // except for "memprofilerate" since there is an
326 // existing int var for that value, which may
327 // already have an initial value.
328 var debug struct {
329 allocfreetrace int32
330 cgocheck int32
331 efence int32
332 gccheckmark int32
333 gcpacertrace int32
334 gcshrinkstackoff int32
335 gcrescanstacks int32
336 gcstoptheworld int32
337 gctrace int32
338 invalidptr int32
339 sbrk int32
340 scavenge int32
341 scheddetail int32
342 schedtrace int32
343 tracebackancestors int32
346 var dbgvars = []dbgVar{
347 {"allocfreetrace", &debug.allocfreetrace},
348 {"cgocheck", &debug.cgocheck},
349 {"efence", &debug.efence},
350 {"gccheckmark", &debug.gccheckmark},
351 {"gcpacertrace", &debug.gcpacertrace},
352 {"gcshrinkstackoff", &debug.gcshrinkstackoff},
353 {"gcrescanstacks", &debug.gcrescanstacks},
354 {"gcstoptheworld", &debug.gcstoptheworld},
355 {"gctrace", &debug.gctrace},
356 {"invalidptr", &debug.invalidptr},
357 {"sbrk", &debug.sbrk},
358 {"scavenge", &debug.scavenge},
359 {"scheddetail", &debug.scheddetail},
360 {"schedtrace", &debug.schedtrace},
361 {"tracebackancestors", &debug.tracebackancestors},
364 func parsedebugvars() {
365 // defaults
366 debug.cgocheck = 1
368 // Unfortunately, because gccgo uses conservative stack scanning,
369 // we can not enable invalid pointer checking. It is possible for
370 // memory block M1 to point to M2, and for both to be dead.
371 // We release M2, causing the entire span to be released.
372 // Before we release M1, a stack pointer appears that point into it.
373 // This stack pointer is presumably dead, but causes M1 to be marked.
374 // We scan M1 and see the pointer to M2 on a released span.
375 // At that point, if debug.invalidptr is set, we crash.
376 // This is not a problem, assuming that M1 really is dead and
377 // the pointer we discovered to it will not be used.
378 // debug.invalidptr = 1
380 for p := gogetenv("GODEBUG"); p != ""; {
381 field := ""
382 i := index(p, ",")
383 if i < 0 {
384 field, p = p, ""
385 } else {
386 field, p = p[:i], p[i+1:]
388 i = index(field, "=")
389 if i < 0 {
390 continue
392 key, value := field[:i], field[i+1:]
394 // Update MemProfileRate directly here since it
395 // is int, not int32, and should only be updated
396 // if specified in GODEBUG.
397 if key == "memprofilerate" {
398 if n, ok := atoi(value); ok {
399 MemProfileRate = n
401 } else {
402 for _, v := range dbgvars {
403 if v.name == key {
404 if n, ok := atoi32(value); ok {
405 *v.value = n
412 setTraceback(gogetenv("GOTRACEBACK"))
413 traceback_env = traceback_cache
416 //go:linkname setTraceback runtime_debug.SetTraceback
417 func setTraceback(level string) {
418 var t uint32
419 switch level {
420 case "none":
421 t = 0
422 case "single", "":
423 t = 1 << tracebackShift
424 case "all":
425 t = 1<<tracebackShift | tracebackAll
426 case "system":
427 t = 2<<tracebackShift | tracebackAll
428 case "crash":
429 t = 2<<tracebackShift | tracebackAll | tracebackCrash
430 default:
431 t = tracebackAll
432 if n, ok := atoi(level); ok && n == int(uint32(n)) {
433 t |= uint32(n) << tracebackShift
436 // when C owns the process, simply exit'ing the process on fatal errors
437 // and panics is surprising. Be louder and abort instead.
438 if islibrary || isarchive {
439 t |= tracebackCrash
442 t |= traceback_env
444 atomic.Store(&traceback_cache, t)
447 // Poor mans 64-bit division.
448 // This is a very special function, do not use it if you are not sure what you are doing.
449 // int64 division is lowered into _divv() call on 386, which does not fit into nosplit functions.
450 // Handles overflow in a time-specific manner.
451 //go:nosplit
452 func timediv(v int64, div int32, rem *int32) int32 {
453 res := int32(0)
454 for bit := 30; bit >= 0; bit-- {
455 if v >= int64(div)<<uint(bit) {
456 v = v - (int64(div) << uint(bit))
457 res += 1 << uint(bit)
460 if v >= int64(div) {
461 if rem != nil {
462 *rem = 0
464 return 0x7fffffff
466 if rem != nil {
467 *rem = int32(v)
469 return res
472 // Helpers for Go. Must be NOSPLIT, must only call NOSPLIT functions, and must not block.
474 //go:nosplit
475 func acquirem() *m {
476 _g_ := getg()
477 _g_.m.locks++
478 return _g_.m
481 //go:nosplit
482 func releasem(mp *m) {
483 // _g_ := getg()
484 mp.locks--
485 // if mp.locks == 0 && _g_.preempt {
486 // // restore the preemption request in case we've cleared it in newstack
487 // _g_.stackguard0 = stackPreempt
488 // }
491 //go:nosplit
492 func gomcache() *mcache {
493 return getg().m.mcache