1 // Copyright 2010 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Export guts for testing.
10 "runtime/internal/atomic"
11 "runtime/internal/sys"
19 //var F64to32 = f64to32
20 //var F32to64 = f32to64
22 //var Fintto64 = fintto64
23 //var F64toint = f64toint
26 var Entersyscall
= entersyscall
27 var Exitsyscall
= exitsyscall
28 var LockedOSThread
= lockedOSThread
29 var Xadduintptr
= atomic
.Xadduintptr
33 var Fastlog2
= fastlog2
43 func LFStackPush(head
*uint64, node
*LFNode
) {
44 (*lfstack
)(head
).push((*lfnode
)(unsafe
.Pointer(node
)))
47 func LFStackPop(head
*uint64) *LFNode
{
48 return (*LFNode
)(unsafe
.Pointer((*lfstack
)(head
).pop()))
51 func GCMask(x
interface{}) (ret
[]byte) {
55 func RunSchedLocalQueueTest() {
57 gs
:= make([]g
, len(_p_
.runq
))
58 for i
:= 0; i
< len(_p_
.runq
); i
++ {
59 if g
, _
:= runqget(_p_
); g
!= nil {
60 throw("runq is not empty initially")
62 for j
:= 0; j
< i
; j
++ {
63 runqput(_p_
, &gs
[i
], false)
65 for j
:= 0; j
< i
; j
++ {
66 if g
, _
:= runqget(_p_
); g
!= &gs
[i
] {
67 print("bad element at iter ", i
, "/", j
, "\n")
71 if g
, _
:= runqget(_p_
); g
!= nil {
72 throw("runq is not empty afterwards")
77 func RunSchedLocalQueueStealTest() {
80 gs
:= make([]g
, len(p1
.runq
))
81 for i
:= 0; i
< len(p1
.runq
); i
++ {
82 for j
:= 0; j
< i
; j
++ {
84 runqput(p1
, &gs
[j
], false)
86 gp
:= runqsteal(p2
, p1
, true)
107 for j
:= 0; j
< i
; j
++ {
109 print("bad element ", j
, "(", gs
[j
].sig
, ") at iter ", i
, "\n")
113 if s
!= i
/2 && s
!= i
/2+1 {
114 print("bad steal ", s
, ", want ", i
/2, " or ", i
/2+1, ", iter ", i
, "\n")
120 func RunSchedLocalQueueEmptyTest(iters
int) {
121 // Test that runq is not spuriously reported as empty.
122 // Runq emptiness affects scheduling decisions and spurious emptiness
123 // can lead to underutilization (both runnable Gs and idle Ps coexist
124 // for arbitrary long time).
125 done
:= make(chan bool, 1)
129 for i
:= 0; i
< iters
; i
++ {
131 next0
:= (i
& 1) == 0
132 next1
:= (i
& 2) == 0
133 runqput(p
, &gs
[0], next0
)
135 for atomic
.Xadd(ready
, 1); atomic
.Load(ready
) != 2; {
138 println("next:", next0
, next1
)
139 throw("queue is empty")
143 for atomic
.Xadd(ready
, 1); atomic
.Load(ready
) != 2; {
145 runqput(p
, &gs
[1], next1
)
152 var StringHash
= stringHash
153 var BytesHash
= bytesHash
154 var Int32Hash
= int32Hash
155 var Int64Hash
= int64Hash
156 var EfaceHash
= efaceHash
157 var IfaceHash
= ifaceHash
159 func MemclrBytes(b
[]byte) {
160 s
:= (*slice
)(unsafe
.Pointer(&b
))
161 memclrNoHeapPointers(s
.array
, uintptr(s
.len))
164 var HashLoad
= &hashLoad
166 // entry point for testing
167 //func GostringW(w []uint16) (s string) {
168 // s = gostringw(&w[0])
172 type Uintreg sys
.Uintreg
179 func Envs() []string { return envs
}
180 func SetEnvs(e
[]string) { envs
= e
}
182 //var BigEndian = sys.BigEndian
186 func BenchSetType(n
int, x
interface{}) {
191 switch t
.kind
& kindMask
{
193 t
= (*ptrtype
)(unsafe
.Pointer(t
)).elem
201 t
= (*slicetype
)(unsafe
.Pointer(t
)).elem
202 size
= t
.size
* slice
.len
205 allocSize
:= roundupsize(size
)
207 for i
:= 0; i
< n
; i
++ {
208 heapBitsSetType(uintptr(p
), allocSize
, size
, t
)
213 const PtrSize
= sys
.PtrSize
215 var ForceGCPeriod
= &forcegcperiod
217 // SetTracebackEnv is like runtime/debug.SetTraceback, but it raises
218 // the "environment" traceback level, so later calls to
219 // debug.SetTraceback (e.g., from testing timeouts) can't lower it.
220 func SetTracebackEnv(level
string) {
222 traceback_env
= traceback_cache
225 var ReadUnaligned32
= readUnaligned32
226 var ReadUnaligned64
= readUnaligned64
228 func CountPagesInUse() (pagesInUse
, counted
uintptr) {
229 stopTheWorld("CountPagesInUse")
231 pagesInUse
= uintptr(mheap_
.pagesInUse
)
233 for _
, s
:= range mheap_
.allspans
{
234 if s
.state
== mSpanInUse
{
244 func Fastrand() uint32 { return fastrand() }
245 func Fastrandn(n
uint32) uint32 { return fastrandn(n
) }
249 func NewProfBuf(hdrsize
, bufwords
, tags
int) *ProfBuf
{
250 return (*ProfBuf
)(newProfBuf(hdrsize
, bufwords
, tags
))
253 func (p
*ProfBuf
) Write(tag
*unsafe
.Pointer
, now
int64, hdr
[]uint64, stk
[]uintptr) {
254 (*profBuf
)(p
).write(tag
, now
, hdr
, stk
)
258 ProfBufBlocking
= profBufBlocking
259 ProfBufNonBlocking
= profBufNonBlocking
262 func (p
*ProfBuf
) Read(mode profBufReadMode
) ([]uint64, []unsafe
.Pointer
, bool) {
263 return (*profBuf
)(p
).read(profBufReadMode(mode
))
266 func (p
*ProfBuf
) Close() {
267 (*profBuf
)(p
).close()
270 // ReadMemStatsSlow returns both the runtime-computed MemStats and
271 // MemStats accumulated by scanning the heap.
272 func ReadMemStatsSlow() (base
, slow MemStats
) {
273 stopTheWorld("ReadMemStatsSlow")
275 // Run on the system stack to avoid stack growth allocation.
277 // Make sure stats don't change.
280 readmemstats_m(&base
)
282 // Initialize slow from base and zero the fields we're
289 var bySize
[_NumSizeClasses
]struct {
290 Mallocs
, Frees
uint64
293 // Add up current allocations in spans.
294 for _
, s
:= range mheap_
.allspans
{
295 if s
.state
!= mSpanInUse
{
298 if sizeclass
:= s
.spanclass
.sizeclass(); sizeclass
== 0 {
300 slow
.Alloc
+= uint64(s
.elemsize
)
302 slow
.Mallocs
+= uint64(s
.allocCount
)
303 slow
.Alloc
+= uint64(s
.allocCount
) * uint64(s
.elemsize
)
304 bySize
[sizeclass
].Mallocs
+= uint64(s
.allocCount
)
308 // Add in frees. readmemstats_m flushed the cached stats, so
309 // these are up-to-date.
311 slow
.Frees
= mheap_
.nlargefree
312 for i
:= range mheap_
.nsmallfree
{
313 slow
.Frees
+= mheap_
.nsmallfree
[i
]
314 bySize
[i
].Frees
= mheap_
.nsmallfree
[i
]
315 bySize
[i
].Mallocs
+= mheap_
.nsmallfree
[i
]
316 smallFree
+= mheap_
.nsmallfree
[i
] * uint64(class_to_size
[i
])
318 slow
.Frees
+= memstats
.tinyallocs
319 slow
.Mallocs
+= slow
.Frees
321 slow
.TotalAlloc
= slow
.Alloc
+ mheap_
.largefree
+ smallFree
323 for i
:= range slow
.BySize
{
324 slow
.BySize
[i
].Mallocs
= bySize
[i
].Mallocs
325 slow
.BySize
[i
].Frees
= bySize
[i
].Frees
335 // BlockOnSystemStack switches to the system stack, prints "x\n" to
336 // stderr, and blocks in a stack containing
337 // "runtime.blockOnSystemStackInternal".
338 func BlockOnSystemStack() {
339 systemstack(blockOnSystemStackInternal
)
342 func blockOnSystemStackInternal() {
348 type RWMutex
struct {
352 func (rw
*RWMutex
) RLock() {
356 func (rw
*RWMutex
) RUnlock() {
360 func (rw
*RWMutex
) Lock() {
364 func (rw
*RWMutex
) Unlock() {