* builtins.def (BUILT_IN_SETJMP): Revert latest change.
[official-gcc.git] / libgo / go / runtime / export_test.go
blob6325dcb39480893c6a27cddc19157060c31591cd
1 // Copyright 2010 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Export guts for testing.
7 package runtime
9 import (
10 "runtime/internal/atomic"
11 "runtime/internal/sys"
12 "unsafe"
15 //var Fadd64 = fadd64
16 //var Fsub64 = fsub64
17 //var Fmul64 = fmul64
18 //var Fdiv64 = fdiv64
19 //var F64to32 = f64to32
20 //var F32to64 = f32to64
21 //var Fcmp64 = fcmp64
22 //var Fintto64 = fintto64
23 //var F64toint = f64toint
24 //var Sqrt = sqrt
26 var Entersyscall = entersyscall
27 var Exitsyscall = exitsyscall
28 var LockedOSThread = lockedOSThread
29 var Xadduintptr = atomic.Xadduintptr
31 var FuncPC = funcPC
33 var Fastlog2 = fastlog2
35 var Atoi = atoi
36 var Atoi32 = atoi32
38 type LFNode struct {
39 Next uint64
40 Pushcnt uintptr
43 func LFStackPush(head *uint64, node *LFNode) {
44 (*lfstack)(head).push((*lfnode)(unsafe.Pointer(node)))
47 func LFStackPop(head *uint64) *LFNode {
48 return (*LFNode)(unsafe.Pointer((*lfstack)(head).pop()))
51 func GCMask(x interface{}) (ret []byte) {
52 return nil
55 func RunSchedLocalQueueTest() {
56 _p_ := new(p)
57 gs := make([]g, len(_p_.runq))
58 for i := 0; i < len(_p_.runq); i++ {
59 if g, _ := runqget(_p_); g != nil {
60 throw("runq is not empty initially")
62 for j := 0; j < i; j++ {
63 runqput(_p_, &gs[i], false)
65 for j := 0; j < i; j++ {
66 if g, _ := runqget(_p_); g != &gs[i] {
67 print("bad element at iter ", i, "/", j, "\n")
68 throw("bad element")
71 if g, _ := runqget(_p_); g != nil {
72 throw("runq is not empty afterwards")
77 func RunSchedLocalQueueStealTest() {
78 p1 := new(p)
79 p2 := new(p)
80 gs := make([]g, len(p1.runq))
81 for i := 0; i < len(p1.runq); i++ {
82 for j := 0; j < i; j++ {
83 gs[j].sig = 0
84 runqput(p1, &gs[j], false)
86 gp := runqsteal(p2, p1, true)
87 s := 0
88 if gp != nil {
89 s++
90 gp.sig++
92 for {
93 gp, _ = runqget(p2)
94 if gp == nil {
95 break
97 s++
98 gp.sig++
100 for {
101 gp, _ = runqget(p1)
102 if gp == nil {
103 break
105 gp.sig++
107 for j := 0; j < i; j++ {
108 if gs[j].sig != 1 {
109 print("bad element ", j, "(", gs[j].sig, ") at iter ", i, "\n")
110 throw("bad element")
113 if s != i/2 && s != i/2+1 {
114 print("bad steal ", s, ", want ", i/2, " or ", i/2+1, ", iter ", i, "\n")
115 throw("bad steal")
120 func RunSchedLocalQueueEmptyTest(iters int) {
121 // Test that runq is not spuriously reported as empty.
122 // Runq emptiness affects scheduling decisions and spurious emptiness
123 // can lead to underutilization (both runnable Gs and idle Ps coexist
124 // for arbitrary long time).
125 done := make(chan bool, 1)
126 p := new(p)
127 gs := make([]g, 2)
128 ready := new(uint32)
129 for i := 0; i < iters; i++ {
130 *ready = 0
131 next0 := (i & 1) == 0
132 next1 := (i & 2) == 0
133 runqput(p, &gs[0], next0)
134 go func() {
135 for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
137 if runqempty(p) {
138 println("next:", next0, next1)
139 throw("queue is empty")
141 done <- true
143 for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
145 runqput(p, &gs[1], next1)
146 runqget(p)
147 <-done
148 runqget(p)
152 var StringHash = stringHash
153 var BytesHash = bytesHash
154 var Int32Hash = int32Hash
155 var Int64Hash = int64Hash
156 var EfaceHash = efaceHash
157 var IfaceHash = ifaceHash
159 func MemclrBytes(b []byte) {
160 s := (*slice)(unsafe.Pointer(&b))
161 memclrNoHeapPointers(s.array, uintptr(s.len))
164 var HashLoad = &hashLoad
166 // entry point for testing
167 //func GostringW(w []uint16) (s string) {
168 // s = gostringw(&w[0])
169 // return
172 type Uintreg sys.Uintreg
174 var Open = open
175 var Close = closefd
176 var Read = read
177 var Write = write
179 func Envs() []string { return envs }
180 func SetEnvs(e []string) { envs = e }
182 //var BigEndian = sys.BigEndian
184 // For benchmarking.
186 func BenchSetType(n int, x interface{}) {
187 e := *efaceOf(&x)
188 t := e._type
189 var size uintptr
190 var p unsafe.Pointer
191 switch t.kind & kindMask {
192 case kindPtr:
193 t = (*ptrtype)(unsafe.Pointer(t)).elem
194 size = t.size
195 p = e.data
196 case kindSlice:
197 slice := *(*struct {
198 ptr unsafe.Pointer
199 len, cap uintptr
200 })(e.data)
201 t = (*slicetype)(unsafe.Pointer(t)).elem
202 size = t.size * slice.len
203 p = slice.ptr
205 allocSize := roundupsize(size)
206 systemstack(func() {
207 for i := 0; i < n; i++ {
208 heapBitsSetType(uintptr(p), allocSize, size, t)
213 const PtrSize = sys.PtrSize
215 var ForceGCPeriod = &forcegcperiod
217 // SetTracebackEnv is like runtime/debug.SetTraceback, but it raises
218 // the "environment" traceback level, so later calls to
219 // debug.SetTraceback (e.g., from testing timeouts) can't lower it.
220 func SetTracebackEnv(level string) {
221 setTraceback(level)
222 traceback_env = traceback_cache
225 var ReadUnaligned32 = readUnaligned32
226 var ReadUnaligned64 = readUnaligned64
228 func CountPagesInUse() (pagesInUse, counted uintptr) {
229 stopTheWorld("CountPagesInUse")
231 pagesInUse = uintptr(mheap_.pagesInUse)
233 for _, s := range mheap_.allspans {
234 if s.state == mSpanInUse {
235 counted += s.npages
239 startTheWorld()
241 return
244 func Fastrand() uint32 { return fastrand() }
245 func Fastrandn(n uint32) uint32 { return fastrandn(n) }
247 type ProfBuf profBuf
249 func NewProfBuf(hdrsize, bufwords, tags int) *ProfBuf {
250 return (*ProfBuf)(newProfBuf(hdrsize, bufwords, tags))
253 func (p *ProfBuf) Write(tag *unsafe.Pointer, now int64, hdr []uint64, stk []uintptr) {
254 (*profBuf)(p).write(tag, now, hdr, stk)
257 const (
258 ProfBufBlocking = profBufBlocking
259 ProfBufNonBlocking = profBufNonBlocking
262 func (p *ProfBuf) Read(mode profBufReadMode) ([]uint64, []unsafe.Pointer, bool) {
263 return (*profBuf)(p).read(profBufReadMode(mode))
266 func (p *ProfBuf) Close() {
267 (*profBuf)(p).close()
270 // ReadMemStatsSlow returns both the runtime-computed MemStats and
271 // MemStats accumulated by scanning the heap.
272 func ReadMemStatsSlow() (base, slow MemStats) {
273 stopTheWorld("ReadMemStatsSlow")
275 // Run on the system stack to avoid stack growth allocation.
276 systemstack(func() {
277 // Make sure stats don't change.
278 getg().m.mallocing++
280 readmemstats_m(&base)
282 // Initialize slow from base and zero the fields we're
283 // recomputing.
284 slow = base
285 slow.Alloc = 0
286 slow.TotalAlloc = 0
287 slow.Mallocs = 0
288 slow.Frees = 0
289 var bySize [_NumSizeClasses]struct {
290 Mallocs, Frees uint64
293 // Add up current allocations in spans.
294 for _, s := range mheap_.allspans {
295 if s.state != mSpanInUse {
296 continue
298 if sizeclass := s.spanclass.sizeclass(); sizeclass == 0 {
299 slow.Mallocs++
300 slow.Alloc += uint64(s.elemsize)
301 } else {
302 slow.Mallocs += uint64(s.allocCount)
303 slow.Alloc += uint64(s.allocCount) * uint64(s.elemsize)
304 bySize[sizeclass].Mallocs += uint64(s.allocCount)
308 // Add in frees. readmemstats_m flushed the cached stats, so
309 // these are up-to-date.
310 var smallFree uint64
311 slow.Frees = mheap_.nlargefree
312 for i := range mheap_.nsmallfree {
313 slow.Frees += mheap_.nsmallfree[i]
314 bySize[i].Frees = mheap_.nsmallfree[i]
315 bySize[i].Mallocs += mheap_.nsmallfree[i]
316 smallFree += mheap_.nsmallfree[i] * uint64(class_to_size[i])
318 slow.Frees += memstats.tinyallocs
319 slow.Mallocs += slow.Frees
321 slow.TotalAlloc = slow.Alloc + mheap_.largefree + smallFree
323 for i := range slow.BySize {
324 slow.BySize[i].Mallocs = bySize[i].Mallocs
325 slow.BySize[i].Frees = bySize[i].Frees
328 getg().m.mallocing--
331 startTheWorld()
332 return
335 // BlockOnSystemStack switches to the system stack, prints "x\n" to
336 // stderr, and blocks in a stack containing
337 // "runtime.blockOnSystemStackInternal".
338 func BlockOnSystemStack() {
339 systemstack(blockOnSystemStackInternal)
342 func blockOnSystemStackInternal() {
343 print("x\n")
344 lock(&deadlock)
345 lock(&deadlock)
348 type RWMutex struct {
349 rw rwmutex
352 func (rw *RWMutex) RLock() {
353 rw.rw.rlock()
356 func (rw *RWMutex) RUnlock() {
357 rw.rw.runlock()
360 func (rw *RWMutex) Lock() {
361 rw.rw.lock()
364 func (rw *RWMutex) Unlock() {
365 rw.rw.unlock()