1 // Copyright 2010 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Export guts for testing.
10 "runtime/internal/atomic"
11 "runtime/internal/sys"
19 //var F64to32 = f64to32
20 //var F32to64 = f32to64
22 //var Fintto64 = fintto64
23 //var F64toint = f64toint
26 var Entersyscall
= entersyscall
27 var Exitsyscall
= exitsyscall
28 var LockedOSThread
= lockedOSThread
29 var Xadduintptr
= atomic
.Xadduintptr
33 var Fastlog2
= fastlog2
43 func LFStackPush(head
*uint64, node
*LFNode
) {
44 (*lfstack
)(head
).push((*lfnode
)(unsafe
.Pointer(node
)))
47 func LFStackPop(head
*uint64) *LFNode
{
48 return (*LFNode
)(unsafe
.Pointer((*lfstack
)(head
).pop()))
51 func GCMask(x
interface{}) (ret
[]byte) {
55 func RunSchedLocalQueueTest() {
57 gs
:= make([]g
, len(_p_
.runq
))
58 for i
:= 0; i
< len(_p_
.runq
); i
++ {
59 if g
, _
:= runqget(_p_
); g
!= nil {
60 throw("runq is not empty initially")
62 for j
:= 0; j
< i
; j
++ {
63 runqput(_p_
, &gs
[i
], false)
65 for j
:= 0; j
< i
; j
++ {
66 if g
, _
:= runqget(_p_
); g
!= &gs
[i
] {
67 print("bad element at iter ", i
, "/", j
, "\n")
71 if g
, _
:= runqget(_p_
); g
!= nil {
72 throw("runq is not empty afterwards")
77 func RunSchedLocalQueueStealTest() {
80 gs
:= make([]g
, len(p1
.runq
))
81 for i
:= 0; i
< len(p1
.runq
); i
++ {
82 for j
:= 0; j
< i
; j
++ {
84 runqput(p1
, &gs
[j
], false)
86 gp
:= runqsteal(p2
, p1
, true)
107 for j
:= 0; j
< i
; j
++ {
109 print("bad element ", j
, "(", gs
[j
].sig
, ") at iter ", i
, "\n")
113 if s
!= i
/2 && s
!= i
/2+1 {
114 print("bad steal ", s
, ", want ", i
/2, " or ", i
/2+1, ", iter ", i
, "\n")
120 func RunSchedLocalQueueEmptyTest(iters
int) {
121 // Test that runq is not spuriously reported as empty.
122 // Runq emptiness affects scheduling decisions and spurious emptiness
123 // can lead to underutilization (both runnable Gs and idle Ps coexist
124 // for arbitrary long time).
125 done
:= make(chan bool, 1)
129 for i
:= 0; i
< iters
; i
++ {
131 next0
:= (i
& 1) == 0
132 next1
:= (i
& 2) == 0
133 runqput(_p_
, &gs
[0], next0
)
134 go func(done
chan bool, p
*p
, ready
*uint32, next0
, next1
bool) {
135 for atomic
.Xadd(ready
, 1); atomic
.Load(ready
) != 2; {
138 println("next:", next0
, next1
)
139 throw("queue is empty")
142 }(done
, _p_
, ready
, next0
, next1
)
143 for atomic
.Xadd(ready
, 1); atomic
.Load(ready
) != 2; {
145 runqput(_p_
, &gs
[1], next1
)
153 StringHash
= stringHash
154 BytesHash
= bytesHash
155 Int32Hash
= int32Hash
156 Int64Hash
= int64Hash
158 MemHash32
= memhash32
159 MemHash64
= memhash64
160 EfaceHash
= efaceHash
161 IfaceHash
= ifaceHash
164 var UseAeshash
= &useAeshash
166 func MemclrBytes(b
[]byte) {
167 s
:= (*slice
)(unsafe
.Pointer(&b
))
168 memclrNoHeapPointers(s
.array
, uintptr(s
.len))
171 var HashLoad
= &hashLoad
173 // entry point for testing
174 //func GostringW(w []uint16) (s string) {
175 // s = gostringw(&w[0])
179 type Uintreg sys
.Uintreg
186 func Envs() []string { return envs
}
187 func SetEnvs(e
[]string) { envs
= e
}
189 //var BigEndian = sys.BigEndian
193 func BenchSetType(n
int, x
interface{}) {
198 switch t
.kind
& kindMask
{
200 t
= (*ptrtype
)(unsafe
.Pointer(t
)).elem
208 t
= (*slicetype
)(unsafe
.Pointer(t
)).elem
209 size
= t
.size
* slice
.len
212 allocSize
:= roundupsize(size
)
214 for i
:= 0; i
< n
; i
++ {
215 heapBitsSetType(uintptr(p
), allocSize
, size
, t
)
220 const PtrSize
= sys
.PtrSize
222 var ForceGCPeriod
= &forcegcperiod
224 // SetTracebackEnv is like runtime/debug.SetTraceback, but it raises
225 // the "environment" traceback level, so later calls to
226 // debug.SetTraceback (e.g., from testing timeouts) can't lower it.
227 func SetTracebackEnv(level
string) {
229 traceback_env
= traceback_cache
232 var ReadUnaligned32
= readUnaligned32
233 var ReadUnaligned64
= readUnaligned64
235 func CountPagesInUse() (pagesInUse
, counted
uintptr) {
236 stopTheWorld("CountPagesInUse")
238 pagesInUse
= uintptr(mheap_
.pagesInUse
)
240 for _
, s
:= range mheap_
.allspans
{
241 if s
.state
== mSpanInUse
{
251 func Fastrand() uint32 { return fastrand() }
252 func Fastrandn(n
uint32) uint32 { return fastrandn(n
) }
256 func NewProfBuf(hdrsize
, bufwords
, tags
int) *ProfBuf
{
257 return (*ProfBuf
)(newProfBuf(hdrsize
, bufwords
, tags
))
260 func (p
*ProfBuf
) Write(tag
*unsafe
.Pointer
, now
int64, hdr
[]uint64, stk
[]uintptr) {
261 (*profBuf
)(p
).write(tag
, now
, hdr
, stk
)
265 ProfBufBlocking
= profBufBlocking
266 ProfBufNonBlocking
= profBufNonBlocking
269 func (p
*ProfBuf
) Read(mode profBufReadMode
) ([]uint64, []unsafe
.Pointer
, bool) {
270 return (*profBuf
)(p
).read(profBufReadMode(mode
))
273 func (p
*ProfBuf
) Close() {
274 (*profBuf
)(p
).close()
277 // ReadMemStatsSlow returns both the runtime-computed MemStats and
278 // MemStats accumulated by scanning the heap.
279 func ReadMemStatsSlow() (base
, slow MemStats
) {
280 stopTheWorld("ReadMemStatsSlow")
282 // Run on the system stack to avoid stack growth allocation.
284 // Make sure stats don't change.
287 readmemstats_m(&base
)
289 // Initialize slow from base and zero the fields we're
296 var bySize
[_NumSizeClasses
]struct {
297 Mallocs
, Frees
uint64
300 // Add up current allocations in spans.
301 for _
, s
:= range mheap_
.allspans
{
302 if s
.state
!= mSpanInUse
{
305 if sizeclass
:= s
.spanclass
.sizeclass(); sizeclass
== 0 {
307 slow
.Alloc
+= uint64(s
.elemsize
)
309 slow
.Mallocs
+= uint64(s
.allocCount
)
310 slow
.Alloc
+= uint64(s
.allocCount
) * uint64(s
.elemsize
)
311 bySize
[sizeclass
].Mallocs
+= uint64(s
.allocCount
)
315 // Add in frees. readmemstats_m flushed the cached stats, so
316 // these are up-to-date.
318 slow
.Frees
= mheap_
.nlargefree
319 for i
:= range mheap_
.nsmallfree
{
320 slow
.Frees
+= mheap_
.nsmallfree
[i
]
321 bySize
[i
].Frees
= mheap_
.nsmallfree
[i
]
322 bySize
[i
].Mallocs
+= mheap_
.nsmallfree
[i
]
323 smallFree
+= mheap_
.nsmallfree
[i
] * uint64(class_to_size
[i
])
325 slow
.Frees
+= memstats
.tinyallocs
326 slow
.Mallocs
+= slow
.Frees
328 slow
.TotalAlloc
= slow
.Alloc
+ mheap_
.largefree
+ smallFree
330 for i
:= range slow
.BySize
{
331 slow
.BySize
[i
].Mallocs
= bySize
[i
].Mallocs
332 slow
.BySize
[i
].Frees
= bySize
[i
].Frees
342 // BlockOnSystemStack switches to the system stack, prints "x\n" to
343 // stderr, and blocks in a stack containing
344 // "runtime.blockOnSystemStackInternal".
345 func BlockOnSystemStack() {
346 systemstack(blockOnSystemStackInternal
)
349 func blockOnSystemStackInternal() {
355 type RWMutex
struct {
359 func (rw
*RWMutex
) RLock() {
363 func (rw
*RWMutex
) RUnlock() {
367 func (rw
*RWMutex
) Lock() {
371 func (rw
*RWMutex
) Unlock() {
375 func MapBucketsCount(m
map[int]int) int {
376 h
:= *(**hmap
)(unsafe
.Pointer(&m
))
380 func MapBucketsPointerIsNil(m
map[int]int) bool {
381 h
:= *(**hmap
)(unsafe
.Pointer(&m
))
382 return h
.buckets
== nil
385 func LockOSCounts() (external
, internal
uint32) {
387 if g
.m
.lockedExt
+g
.m
.lockedInt
== 0 {
389 panic("lockedm on non-locked goroutine")
393 panic("nil lockedm on locked goroutine")
396 return g
.m
.lockedExt
, g
.m
.lockedInt