2018-03-01 Paul Thomas <pault@gcc.gnu.org>
[official-gcc.git] / libgo / go / runtime / export_test.go
blob5e798e3e3a3c6d0225dfff8eee1c13d90c7673f1
1 // Copyright 2010 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Export guts for testing.
7 package runtime
9 import (
10 "runtime/internal/atomic"
11 "runtime/internal/sys"
12 "unsafe"
15 //var Fadd64 = fadd64
16 //var Fsub64 = fsub64
17 //var Fmul64 = fmul64
18 //var Fdiv64 = fdiv64
19 //var F64to32 = f64to32
20 //var F32to64 = f32to64
21 //var Fcmp64 = fcmp64
22 //var Fintto64 = fintto64
23 //var F64toint = f64toint
24 //var Sqrt = sqrt
26 var Entersyscall = entersyscall
27 var Exitsyscall = exitsyscall
28 var LockedOSThread = lockedOSThread
29 var Xadduintptr = atomic.Xadduintptr
31 var FuncPC = funcPC
33 var Fastlog2 = fastlog2
35 var Atoi = atoi
36 var Atoi32 = atoi32
38 type LFNode struct {
39 Next uint64
40 Pushcnt uintptr
43 func LFStackPush(head *uint64, node *LFNode) {
44 (*lfstack)(head).push((*lfnode)(unsafe.Pointer(node)))
47 func LFStackPop(head *uint64) *LFNode {
48 return (*LFNode)(unsafe.Pointer((*lfstack)(head).pop()))
51 func GCMask(x interface{}) (ret []byte) {
52 return nil
55 func RunSchedLocalQueueTest() {
56 _p_ := new(p)
57 gs := make([]g, len(_p_.runq))
58 for i := 0; i < len(_p_.runq); i++ {
59 if g, _ := runqget(_p_); g != nil {
60 throw("runq is not empty initially")
62 for j := 0; j < i; j++ {
63 runqput(_p_, &gs[i], false)
65 for j := 0; j < i; j++ {
66 if g, _ := runqget(_p_); g != &gs[i] {
67 print("bad element at iter ", i, "/", j, "\n")
68 throw("bad element")
71 if g, _ := runqget(_p_); g != nil {
72 throw("runq is not empty afterwards")
77 func RunSchedLocalQueueStealTest() {
78 p1 := new(p)
79 p2 := new(p)
80 gs := make([]g, len(p1.runq))
81 for i := 0; i < len(p1.runq); i++ {
82 for j := 0; j < i; j++ {
83 gs[j].sig = 0
84 runqput(p1, &gs[j], false)
86 gp := runqsteal(p2, p1, true)
87 s := 0
88 if gp != nil {
89 s++
90 gp.sig++
92 for {
93 gp, _ = runqget(p2)
94 if gp == nil {
95 break
97 s++
98 gp.sig++
100 for {
101 gp, _ = runqget(p1)
102 if gp == nil {
103 break
105 gp.sig++
107 for j := 0; j < i; j++ {
108 if gs[j].sig != 1 {
109 print("bad element ", j, "(", gs[j].sig, ") at iter ", i, "\n")
110 throw("bad element")
113 if s != i/2 && s != i/2+1 {
114 print("bad steal ", s, ", want ", i/2, " or ", i/2+1, ", iter ", i, "\n")
115 throw("bad steal")
120 func RunSchedLocalQueueEmptyTest(iters int) {
121 // Test that runq is not spuriously reported as empty.
122 // Runq emptiness affects scheduling decisions and spurious emptiness
123 // can lead to underutilization (both runnable Gs and idle Ps coexist
124 // for arbitrary long time).
125 done := make(chan bool, 1)
126 _p_ := new(p)
127 gs := make([]g, 2)
128 ready := new(uint32)
129 for i := 0; i < iters; i++ {
130 *ready = 0
131 next0 := (i & 1) == 0
132 next1 := (i & 2) == 0
133 runqput(_p_, &gs[0], next0)
134 go func(done chan bool, p *p, ready *uint32, next0, next1 bool) {
135 for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
137 if runqempty(p) {
138 println("next:", next0, next1)
139 throw("queue is empty")
141 done <- true
142 }(done, _p_, ready, next0, next1)
143 for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
145 runqput(_p_, &gs[1], next1)
146 runqget(_p_)
147 <-done
148 runqget(_p_)
152 var (
153 StringHash = stringHash
154 BytesHash = bytesHash
155 Int32Hash = int32Hash
156 Int64Hash = int64Hash
157 MemHash = memhash
158 MemHash32 = memhash32
159 MemHash64 = memhash64
160 EfaceHash = efaceHash
161 IfaceHash = ifaceHash
164 var UseAeshash = &useAeshash
166 func MemclrBytes(b []byte) {
167 s := (*slice)(unsafe.Pointer(&b))
168 memclrNoHeapPointers(s.array, uintptr(s.len))
171 var HashLoad = &hashLoad
173 // entry point for testing
174 //func GostringW(w []uint16) (s string) {
175 // s = gostringw(&w[0])
176 // return
179 type Uintreg sys.Uintreg
181 var Open = open
182 var Close = closefd
183 var Read = read
184 var Write = write
186 func Envs() []string { return envs }
187 func SetEnvs(e []string) { envs = e }
189 //var BigEndian = sys.BigEndian
191 // For benchmarking.
193 func BenchSetType(n int, x interface{}) {
194 e := *efaceOf(&x)
195 t := e._type
196 var size uintptr
197 var p unsafe.Pointer
198 switch t.kind & kindMask {
199 case kindPtr:
200 t = (*ptrtype)(unsafe.Pointer(t)).elem
201 size = t.size
202 p = e.data
203 case kindSlice:
204 slice := *(*struct {
205 ptr unsafe.Pointer
206 len, cap uintptr
207 })(e.data)
208 t = (*slicetype)(unsafe.Pointer(t)).elem
209 size = t.size * slice.len
210 p = slice.ptr
212 allocSize := roundupsize(size)
213 systemstack(func() {
214 for i := 0; i < n; i++ {
215 heapBitsSetType(uintptr(p), allocSize, size, t)
220 const PtrSize = sys.PtrSize
222 var ForceGCPeriod = &forcegcperiod
224 // SetTracebackEnv is like runtime/debug.SetTraceback, but it raises
225 // the "environment" traceback level, so later calls to
226 // debug.SetTraceback (e.g., from testing timeouts) can't lower it.
227 func SetTracebackEnv(level string) {
228 setTraceback(level)
229 traceback_env = traceback_cache
232 var ReadUnaligned32 = readUnaligned32
233 var ReadUnaligned64 = readUnaligned64
235 func CountPagesInUse() (pagesInUse, counted uintptr) {
236 stopTheWorld("CountPagesInUse")
238 pagesInUse = uintptr(mheap_.pagesInUse)
240 for _, s := range mheap_.allspans {
241 if s.state == mSpanInUse {
242 counted += s.npages
246 startTheWorld()
248 return
251 func Fastrand() uint32 { return fastrand() }
252 func Fastrandn(n uint32) uint32 { return fastrandn(n) }
254 type ProfBuf profBuf
256 func NewProfBuf(hdrsize, bufwords, tags int) *ProfBuf {
257 return (*ProfBuf)(newProfBuf(hdrsize, bufwords, tags))
260 func (p *ProfBuf) Write(tag *unsafe.Pointer, now int64, hdr []uint64, stk []uintptr) {
261 (*profBuf)(p).write(tag, now, hdr, stk)
264 const (
265 ProfBufBlocking = profBufBlocking
266 ProfBufNonBlocking = profBufNonBlocking
269 func (p *ProfBuf) Read(mode profBufReadMode) ([]uint64, []unsafe.Pointer, bool) {
270 return (*profBuf)(p).read(profBufReadMode(mode))
273 func (p *ProfBuf) Close() {
274 (*profBuf)(p).close()
277 // ReadMemStatsSlow returns both the runtime-computed MemStats and
278 // MemStats accumulated by scanning the heap.
279 func ReadMemStatsSlow() (base, slow MemStats) {
280 stopTheWorld("ReadMemStatsSlow")
282 // Run on the system stack to avoid stack growth allocation.
283 systemstack(func() {
284 // Make sure stats don't change.
285 getg().m.mallocing++
287 readmemstats_m(&base)
289 // Initialize slow from base and zero the fields we're
290 // recomputing.
291 slow = base
292 slow.Alloc = 0
293 slow.TotalAlloc = 0
294 slow.Mallocs = 0
295 slow.Frees = 0
296 var bySize [_NumSizeClasses]struct {
297 Mallocs, Frees uint64
300 // Add up current allocations in spans.
301 for _, s := range mheap_.allspans {
302 if s.state != mSpanInUse {
303 continue
305 if sizeclass := s.spanclass.sizeclass(); sizeclass == 0 {
306 slow.Mallocs++
307 slow.Alloc += uint64(s.elemsize)
308 } else {
309 slow.Mallocs += uint64(s.allocCount)
310 slow.Alloc += uint64(s.allocCount) * uint64(s.elemsize)
311 bySize[sizeclass].Mallocs += uint64(s.allocCount)
315 // Add in frees. readmemstats_m flushed the cached stats, so
316 // these are up-to-date.
317 var smallFree uint64
318 slow.Frees = mheap_.nlargefree
319 for i := range mheap_.nsmallfree {
320 slow.Frees += mheap_.nsmallfree[i]
321 bySize[i].Frees = mheap_.nsmallfree[i]
322 bySize[i].Mallocs += mheap_.nsmallfree[i]
323 smallFree += mheap_.nsmallfree[i] * uint64(class_to_size[i])
325 slow.Frees += memstats.tinyallocs
326 slow.Mallocs += slow.Frees
328 slow.TotalAlloc = slow.Alloc + mheap_.largefree + smallFree
330 for i := range slow.BySize {
331 slow.BySize[i].Mallocs = bySize[i].Mallocs
332 slow.BySize[i].Frees = bySize[i].Frees
335 getg().m.mallocing--
338 startTheWorld()
339 return
342 // BlockOnSystemStack switches to the system stack, prints "x\n" to
343 // stderr, and blocks in a stack containing
344 // "runtime.blockOnSystemStackInternal".
345 func BlockOnSystemStack() {
346 systemstack(blockOnSystemStackInternal)
349 func blockOnSystemStackInternal() {
350 print("x\n")
351 lock(&deadlock)
352 lock(&deadlock)
355 type RWMutex struct {
356 rw rwmutex
359 func (rw *RWMutex) RLock() {
360 rw.rw.rlock()
363 func (rw *RWMutex) RUnlock() {
364 rw.rw.runlock()
367 func (rw *RWMutex) Lock() {
368 rw.rw.lock()
371 func (rw *RWMutex) Unlock() {
372 rw.rw.unlock()
375 func MapBucketsCount(m map[int]int) int {
376 h := *(**hmap)(unsafe.Pointer(&m))
377 return 1 << h.B
380 func MapBucketsPointerIsNil(m map[int]int) bool {
381 h := *(**hmap)(unsafe.Pointer(&m))
382 return h.buckets == nil
385 func LockOSCounts() (external, internal uint32) {
386 g := getg()
387 if g.m.lockedExt+g.m.lockedInt == 0 {
388 if g.lockedm != 0 {
389 panic("lockedm on non-locked goroutine")
391 } else {
392 if g.lockedm == 0 {
393 panic("nil lockedm on locked goroutine")
396 return g.m.lockedExt, g.m.lockedInt