1 // Copyright 2013 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
17 func TestMemStats(t
*testing
.T
) {
18 t
.Skip("skipping test with gccgo")
20 // Make sure there's at least one forced GC.
23 // Test that MemStats has sane values.
27 nz
:= func(x
interface{}) error
{
28 if x
!= reflect
.Zero(reflect
.TypeOf(x
)).Interface() {
31 return fmt
.Errorf("zero value")
33 le
:= func(thresh
float64) func(interface{}) error
{
34 return func(x
interface{}) error
{
35 if reflect
.ValueOf(x
).Convert(reflect
.TypeOf(thresh
)).Float() < thresh
{
38 return fmt
.Errorf("insanely high value (overflow?); want <= %v", thresh
)
41 eq
:= func(x
interface{}) func(interface{}) error
{
42 return func(y
interface{}) error
{
46 return fmt
.Errorf("want %v", x
)
49 // Of the uint fields, HeapReleased, HeapIdle can be 0.
50 // PauseTotalNs can be 0 if timer resolution is poor.
52 // TODO: Test that GCCPUFraction is <= 0.99. This currently
53 // fails on windows/386. (Issue #19319)
54 fields
:= map[string][]func(interface{}) error
{
55 "Alloc": {nz
, le(1e10
)}, "TotalAlloc": {nz
, le(1e11
)}, "Sys": {nz
, le(1e10
)},
56 "Lookups": {nz
, le(1e10
)}, "Mallocs": {nz
, le(1e10
)}, "Frees": {nz
, le(1e10
)},
57 "HeapAlloc": {nz
, le(1e10
)}, "HeapSys": {nz
, le(1e10
)}, "HeapIdle": {le(1e10
)},
58 "HeapInuse": {nz
, le(1e10
)}, "HeapReleased": {le(1e10
)}, "HeapObjects": {nz
, le(1e10
)},
59 "StackInuse": {nz
, le(1e10
)}, "StackSys": {nz
, le(1e10
)},
60 "MSpanInuse": {nz
, le(1e10
)}, "MSpanSys": {nz
, le(1e10
)},
61 "MCacheInuse": {nz
, le(1e10
)}, "MCacheSys": {nz
, le(1e10
)},
62 "BuckHashSys": {nz
, le(1e10
)}, "GCSys": {nz
, le(1e10
)}, "OtherSys": {nz
, le(1e10
)},
63 "NextGC": {nz
, le(1e10
)}, "LastGC": {nz
},
64 "PauseTotalNs": {le(1e11
)}, "PauseNs": nil, "PauseEnd": nil,
65 "NumGC": {nz
, le(1e9
)}, "NumForcedGC": {nz
, le(1e9
)},
66 "GCCPUFraction": nil, "EnableGC": {eq(true)}, "DebugGC": {eq(false)},
70 rst
:= reflect
.ValueOf(st
).Elem()
71 for i
:= 0; i
< rst
.Type().NumField(); i
++ {
72 name
, val
:= rst
.Type().Field(i
).Name
, rst
.Field(i
).Interface()
73 checks
, ok
:= fields
[name
]
75 t
.Errorf("unknown MemStats field %s", name
)
78 for _
, check
:= range checks
{
79 if err
:= check(val
); err
!= nil {
80 t
.Errorf("%s = %v: %s", name
, val
, err
)
84 if st
.Sys
!= st
.HeapSys
+st
.StackSys
+st
.MSpanSys
+st
.MCacheSys
+
85 st
.BuckHashSys
+st
.GCSys
+st
.OtherSys
{
86 t
.Fatalf("Bad sys value: %+v", *st
)
89 if st
.HeapIdle
+st
.HeapInuse
!= st
.HeapSys
{
90 t
.Fatalf("HeapIdle(%d) + HeapInuse(%d) should be equal to HeapSys(%d), but isn't.", st
.HeapIdle
, st
.HeapInuse
, st
.HeapSys
)
93 if lpe
:= st
.PauseEnd
[int(st
.NumGC
+255)%len
(st
.PauseEnd
)]; st
.LastGC
!= lpe
{
94 t
.Fatalf("LastGC(%d) != last PauseEnd(%d)", st
.LastGC
, lpe
)
98 for _
, pause
:= range st
.PauseNs
{
101 if int(st
.NumGC
) < len(st
.PauseNs
) {
102 // We have all pauses, so this should be exact.
103 if st
.PauseTotalNs
!= pauseTotal
{
104 t
.Fatalf("PauseTotalNs(%d) != sum PauseNs(%d)", st
.PauseTotalNs
, pauseTotal
)
106 for i
:= int(st
.NumGC
); i
< len(st
.PauseNs
); i
++ {
107 if st
.PauseNs
[i
] != 0 {
108 t
.Fatalf("Non-zero PauseNs[%d]: %+v", i
, st
)
110 if st
.PauseEnd
[i
] != 0 {
111 t
.Fatalf("Non-zero PauseEnd[%d]: %+v", i
, st
)
115 if st
.PauseTotalNs
< pauseTotal
{
116 t
.Fatalf("PauseTotalNs(%d) < sum PauseNs(%d)", st
.PauseTotalNs
, pauseTotal
)
120 if st
.NumForcedGC
> st
.NumGC
{
121 t
.Fatalf("NumForcedGC(%d) > NumGC(%d)", st
.NumForcedGC
, st
.NumGC
)
125 func TestStringConcatenationAllocs(t
*testing
.T
) {
126 t
.Skip("skipping test with gccgo")
127 n
:= testing
.AllocsPerRun(1e3
, func() {
128 b
:= make([]byte, 10)
129 for i
:= 0; i
< 10; i
++ {
132 s
:= "foo" + string(b
)
133 if want
:= "foo0123456789"; s
!= want
{
134 t
.Fatalf("want %v, got %v", want
, s
)
137 // Only string concatenation allocates.
139 t
.Fatalf("want 1 allocation, got %v", n
)
143 func TestTinyAlloc(t
*testing
.T
) {
145 var v
[N
]unsafe
.Pointer
147 v
[i
] = unsafe
.Pointer(new(byte))
150 chunks
:= make(map[uintptr]bool, N
)
151 for _
, p
:= range v
{
152 chunks
[uintptr(p
)&^7] = true
155 if len(chunks
) == N
{
156 t
.Fatal("no bytes allocated within the same 8-byte chunk")
160 var mallocSink
uintptr
162 func BenchmarkMalloc8(b
*testing
.B
) {
164 for i
:= 0; i
< b
.N
; i
++ {
166 x
^= uintptr(unsafe
.Pointer(p
))
171 func BenchmarkMalloc16(b
*testing
.B
) {
173 for i
:= 0; i
< b
.N
; i
++ {
175 x
^= uintptr(unsafe
.Pointer(p
))
180 func BenchmarkMallocTypeInfo8(b
*testing
.B
) {
182 for i
:= 0; i
< b
.N
; i
++ {
184 p
[8 / unsafe
.Sizeof(uintptr(0))]*int
186 x
^= uintptr(unsafe
.Pointer(p
))
191 func BenchmarkMallocTypeInfo16(b
*testing
.B
) {
193 for i
:= 0; i
< b
.N
; i
++ {
195 p
[16 / unsafe
.Sizeof(uintptr(0))]*int
197 x
^= uintptr(unsafe
.Pointer(p
))
202 var n
= flag
.Int("n", 1000, "number of goroutines")
204 func BenchmarkGoroutineSelect(b
*testing
.B
) {
205 quit
:= make(chan struct{})
206 read
:= func(ch
chan struct{}) {
218 benchHelper(b
, *n
, read
)
221 func BenchmarkGoroutineBlocking(b
*testing
.B
) {
222 read
:= func(ch
chan struct{}) {
224 if _
, ok
:= <-ch
; !ok
{
229 benchHelper(b
, *n
, read
)
232 func BenchmarkGoroutineForRange(b
*testing
.B
) {
233 read
:= func(ch
chan struct{}) {
237 benchHelper(b
, *n
, read
)
240 func benchHelper(b
*testing
.B
, n
int, read
func(chan struct{})) {
241 m
:= make([]chan struct{}, n
)
243 m
[i
] = make(chan struct{}, 1)
250 for i
:= 0; i
< b
.N
; i
++ {
251 for _
, ch
:= range m
{
256 time
.Sleep(10 * time
.Millisecond
)
262 for _
, ch
:= range m
{
265 time
.Sleep(10 * time
.Millisecond
)
268 func BenchmarkGoroutineIdle(b
*testing
.B
) {
269 quit
:= make(chan struct{})
273 for i
:= 0; i
< *n
; i
++ {
280 for i
:= 0; i
< b
.N
; i
++ {
286 time
.Sleep(10 * time
.Millisecond
)