hppa: Fix ICE caused by mismatched predicate and constraint in xmpyu patterns
[official-gcc.git] / libgo / go / runtime / runtime1.go
blob679bde6c6fa76a0199de976a980d3ccae111290e
1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 package runtime
7 import (
8 "internal/bytealg"
9 "internal/goarch"
10 "runtime/internal/atomic"
11 "unsafe"
14 // For gccgo, while we still have C runtime code, use go:linkname to
15 // export some functions to themselves.
17 //go:linkname gotraceback
18 //go:linkname args
19 //go:linkname goargs
20 //go:linkname check
21 //go:linkname goenvs_unix
22 //go:linkname parsedebugvars
23 //go:linkname timediv
25 // Keep a cached value to make gotraceback fast,
26 // since we call it on every call to gentraceback.
27 // The cached value is a uint32 in which the low bits
28 // are the "crash" and "all" settings and the remaining
29 // bits are the traceback value (0 off, 1 on, 2 include system).
30 const (
31 tracebackCrash = 1 << iota
32 tracebackAll
33 tracebackShift = iota
36 var traceback_cache uint32 = 2 << tracebackShift
37 var traceback_env uint32
39 // gotraceback returns the current traceback settings.
41 // If level is 0, suppress all tracebacks.
42 // If level is 1, show tracebacks, but exclude runtime frames.
43 // If level is 2, show tracebacks including runtime frames.
44 // If all is set, print all goroutine stacks. Otherwise, print just the current goroutine.
45 // If crash is set, crash (core dump, etc) after tracebacking.
47 //go:nosplit
48 func gotraceback() (level int32, all, crash bool) {
49 _g_ := getg()
50 t := atomic.Load(&traceback_cache)
51 crash = t&tracebackCrash != 0
52 all = _g_.m.throwing > 0 || t&tracebackAll != 0
53 if _g_.m.traceback != 0 {
54 level = int32(_g_.m.traceback)
55 } else {
56 level = int32(t >> tracebackShift)
58 return
61 var (
62 argc int32
63 argv **byte
66 // nosplit for use in linux startup sysargs
67 //go:nosplit
68 func argv_index(argv **byte, i int32) *byte {
69 return *(**byte)(add(unsafe.Pointer(argv), uintptr(i)*goarch.PtrSize))
72 func args(c int32, v **byte) {
73 argc = c
74 argv = v
75 sysargs(c, v)
78 func goargs() {
79 if GOOS == "windows" {
80 return
82 argslice = make([]string, argc)
83 for i := int32(0); i < argc; i++ {
84 argslice[i] = gostringnocopy(argv_index(argv, i))
88 func goenvs_unix() {
89 // TODO(austin): ppc64 in dynamic linking mode doesn't
90 // guarantee env[] will immediately follow argv. Might cause
91 // problems.
92 n := int32(0)
93 for argv_index(argv, argc+1+n) != nil {
94 n++
97 envs = make([]string, n)
98 for i := int32(0); i < n; i++ {
99 envs[i] = gostring(argv_index(argv, argc+1+i))
103 func environ() []string {
104 return envs
107 // TODO: These should be locals in testAtomic64, but we don't 8-byte
108 // align stack variables on 386.
109 var test_z64, test_x64 uint64
111 func testAtomic64() {
112 test_z64 = 42
113 test_x64 = 0
114 if atomic.Cas64(&test_z64, test_x64, 1) {
115 throw("cas64 failed")
117 if test_x64 != 0 {
118 throw("cas64 failed")
120 test_x64 = 42
121 if !atomic.Cas64(&test_z64, test_x64, 1) {
122 throw("cas64 failed")
124 if test_x64 != 42 || test_z64 != 1 {
125 throw("cas64 failed")
127 if atomic.Load64(&test_z64) != 1 {
128 throw("load64 failed")
130 atomic.Store64(&test_z64, (1<<40)+1)
131 if atomic.Load64(&test_z64) != (1<<40)+1 {
132 throw("store64 failed")
134 if atomic.Xadd64(&test_z64, (1<<40)+1) != (2<<40)+2 {
135 throw("xadd64 failed")
137 if atomic.Load64(&test_z64) != (2<<40)+2 {
138 throw("xadd64 failed")
140 if atomic.Xchg64(&test_z64, (3<<40)+3) != (2<<40)+2 {
141 throw("xchg64 failed")
143 if atomic.Load64(&test_z64) != (3<<40)+3 {
144 throw("xchg64 failed")
148 func check() {
149 var (
150 a int8
151 b uint8
152 c int16
153 d uint16
154 e int32
155 f uint32
156 g int64
157 h uint64
158 i, i1 float32
159 j, j1 float64
160 k unsafe.Pointer
161 l *uint16
162 m [4]byte
164 type x1t struct {
165 x uint8
167 type y1t struct {
168 x1 x1t
169 y uint8
171 var x1 x1t
172 var y1 y1t
174 if unsafe.Sizeof(a) != 1 {
175 throw("bad a")
177 if unsafe.Sizeof(b) != 1 {
178 throw("bad b")
180 if unsafe.Sizeof(c) != 2 {
181 throw("bad c")
183 if unsafe.Sizeof(d) != 2 {
184 throw("bad d")
186 if unsafe.Sizeof(e) != 4 {
187 throw("bad e")
189 if unsafe.Sizeof(f) != 4 {
190 throw("bad f")
192 if unsafe.Sizeof(g) != 8 {
193 throw("bad g")
195 if unsafe.Sizeof(h) != 8 {
196 throw("bad h")
198 if unsafe.Sizeof(i) != 4 {
199 throw("bad i")
201 if unsafe.Sizeof(j) != 8 {
202 throw("bad j")
204 if unsafe.Sizeof(k) != goarch.PtrSize {
205 throw("bad k")
207 if unsafe.Sizeof(l) != goarch.PtrSize {
208 throw("bad l")
210 if unsafe.Sizeof(x1) != 1 {
211 throw("bad unsafe.Sizeof x1")
213 if unsafe.Offsetof(y1.y) != 1 {
214 throw("bad offsetof y1.y")
216 if unsafe.Sizeof(y1) != 2 {
217 throw("bad unsafe.Sizeof y1")
220 if timediv(12345*1000000000+54321, 1000000000, &e) != 12345 || e != 54321 {
221 throw("bad timediv")
224 var z uint32
225 z = 1
226 if !atomic.Cas(&z, 1, 2) {
227 throw("cas1")
229 if z != 2 {
230 throw("cas2")
233 z = 4
234 if atomic.Cas(&z, 5, 6) {
235 throw("cas3")
237 if z != 4 {
238 throw("cas4")
241 z = 0xffffffff
242 if !atomic.Cas(&z, 0xffffffff, 0xfffffffe) {
243 throw("cas5")
245 if z != 0xfffffffe {
246 throw("cas6")
249 m = [4]byte{1, 1, 1, 1}
250 atomic.Or8(&m[1], 0xf0)
251 if m[0] != 1 || m[1] != 0xf1 || m[2] != 1 || m[3] != 1 {
252 throw("atomicor8")
255 m = [4]byte{0xff, 0xff, 0xff, 0xff}
256 atomic.And8(&m[1], 0x1)
257 if m[0] != 0xff || m[1] != 0x1 || m[2] != 0xff || m[3] != 0xff {
258 throw("atomicand8")
261 *(*uint64)(unsafe.Pointer(&j)) = ^uint64(0)
262 if j == j {
263 throw("float64nan")
265 if !(j != j) {
266 throw("float64nan1")
269 *(*uint64)(unsafe.Pointer(&j1)) = ^uint64(1)
270 if j == j1 {
271 throw("float64nan2")
273 if !(j != j1) {
274 throw("float64nan3")
277 *(*uint32)(unsafe.Pointer(&i)) = ^uint32(0)
278 if i == i {
279 throw("float32nan")
281 if i == i {
282 throw("float32nan1")
285 *(*uint32)(unsafe.Pointer(&i1)) = ^uint32(1)
286 if i == i1 {
287 throw("float32nan2")
289 if i == i1 {
290 throw("float32nan3")
293 testAtomic64()
295 // if _FixedStack != round2(_FixedStack) {
296 // throw("FixedStack is not power-of-2")
297 // }
299 if !checkASM() {
300 throw("assembly checks failed")
304 type dbgVar struct {
305 name string
306 value *int32
309 // Holds variables parsed from GODEBUG env var,
310 // except for "memprofilerate" since there is an
311 // existing int var for that value, which may
312 // already have an initial value.
313 var debug struct {
314 cgocheck int32
315 clobberfree int32
316 efence int32
317 gccheckmark int32
318 gcpacertrace int32
319 gcshrinkstackoff int32
320 gcstoptheworld int32
321 gctrace int32
322 invalidptr int32
323 madvdontneed int32 // for Linux; issue 28466
324 scavtrace int32
325 scheddetail int32
326 schedtrace int32
327 tracebackancestors int32
328 asyncpreemptoff int32
329 harddecommit int32
331 // debug.malloc is used as a combined debug check
332 // in the malloc function and should be set
333 // if any of the below debug options is != 0.
334 malloc bool
335 allocfreetrace int32
336 inittrace int32
337 sbrk int32
340 var dbgvars = []dbgVar{
341 {"allocfreetrace", &debug.allocfreetrace},
342 {"clobberfree", &debug.clobberfree},
343 {"cgocheck", &debug.cgocheck},
344 {"efence", &debug.efence},
345 {"gccheckmark", &debug.gccheckmark},
346 {"gcpacertrace", &debug.gcpacertrace},
347 {"gcshrinkstackoff", &debug.gcshrinkstackoff},
348 {"gcstoptheworld", &debug.gcstoptheworld},
349 {"gctrace", &debug.gctrace},
350 {"invalidptr", &debug.invalidptr},
351 {"madvdontneed", &debug.madvdontneed},
352 {"sbrk", &debug.sbrk},
353 {"scavtrace", &debug.scavtrace},
354 {"scheddetail", &debug.scheddetail},
355 {"schedtrace", &debug.schedtrace},
356 {"tracebackancestors", &debug.tracebackancestors},
357 {"asyncpreemptoff", &debug.asyncpreemptoff},
358 {"inittrace", &debug.inittrace},
359 {"harddecommit", &debug.harddecommit},
362 func parsedebugvars() {
363 // defaults
364 debug.cgocheck = 1
366 // Gccgo uses conservative stack scanning, so we cannot check
367 // invalid pointers on stack. But we can still enable invalid
368 // pointer check on heap scanning. When scanning the heap, we
369 // ensure that we only trace allocated heap objects, which should
370 // not contain invalid pointers.
371 debug.invalidptr = 1
372 if GOOS == "linux" {
373 // On Linux, MADV_FREE is faster than MADV_DONTNEED,
374 // but doesn't affect many of the statistics that
375 // MADV_DONTNEED does until the memory is actually
376 // reclaimed. This generally leads to poor user
377 // experience, like confusing stats in top and other
378 // monitoring tools; and bad integration with
379 // management systems that respond to memory usage.
380 // Hence, default to MADV_DONTNEED.
381 debug.madvdontneed = 1
384 for p := gogetenv("GODEBUG"); p != ""; {
385 field := ""
386 i := bytealg.IndexByteString(p, ',')
387 if i < 0 {
388 field, p = p, ""
389 } else {
390 field, p = p[:i], p[i+1:]
392 i = bytealg.IndexByteString(field, '=')
393 if i < 0 {
394 continue
396 key, value := field[:i], field[i+1:]
398 // Update MemProfileRate directly here since it
399 // is int, not int32, and should only be updated
400 // if specified in GODEBUG.
401 if key == "memprofilerate" {
402 if n, ok := atoi(value); ok {
403 MemProfileRate = n
405 } else {
406 for _, v := range dbgvars {
407 if v.name == key {
408 if n, ok := atoi32(value); ok {
409 *v.value = n
416 debug.malloc = (debug.allocfreetrace | debug.inittrace | debug.sbrk) != 0
418 setTraceback(gogetenv("GOTRACEBACK"))
419 traceback_env = traceback_cache
422 //go:linkname setTraceback runtime_1debug.SetTraceback
423 func setTraceback(level string) {
424 var t uint32
425 switch level {
426 case "none":
427 t = 0
428 case "single", "":
429 t = 1 << tracebackShift
430 case "all":
431 t = 1<<tracebackShift | tracebackAll
432 case "system":
433 t = 2<<tracebackShift | tracebackAll
434 case "crash":
435 t = 2<<tracebackShift | tracebackAll | tracebackCrash
436 default:
437 t = tracebackAll
438 if n, ok := atoi(level); ok && n == int(uint32(n)) {
439 t |= uint32(n) << tracebackShift
442 // when C owns the process, simply exit'ing the process on fatal errors
443 // and panics is surprising. Be louder and abort instead.
444 if islibrary || isarchive {
445 t |= tracebackCrash
448 t |= traceback_env
450 atomic.Store(&traceback_cache, t)
453 // Poor mans 64-bit division.
454 // This is a very special function, do not use it if you are not sure what you are doing.
455 // int64 division is lowered into _divv() call on 386, which does not fit into nosplit functions.
456 // Handles overflow in a time-specific manner.
457 // This keeps us within no-split stack limits on 32-bit processors.
458 //go:nosplit
459 func timediv(v int64, div int32, rem *int32) int32 {
460 res := int32(0)
461 for bit := 30; bit >= 0; bit-- {
462 if v >= int64(div)<<uint(bit) {
463 v = v - (int64(div) << uint(bit))
464 // Before this for loop, res was 0, thus all these
465 // power of 2 increments are now just bitsets.
466 res |= 1 << uint(bit)
469 if v >= int64(div) {
470 if rem != nil {
471 *rem = 0
473 return 0x7fffffff
475 if rem != nil {
476 *rem = int32(v)
478 return res
481 // Helpers for Go. Must be NOSPLIT, must only call NOSPLIT functions, and must not block.
483 //go:nosplit
484 func acquirem() *m {
485 _g_ := getg()
486 _g_.m.locks++
487 return _g_.m
490 //go:nosplit
491 func releasem(mp *m) {
492 // _g_ := getg()
493 mp.locks--
494 // if mp.locks == 0 && _g_.preempt {
495 // // restore the preemption request in case we've cleared it in newstack
496 // _g_.stackguard0 = stackPreempt
497 // }