1 // Copyright 2013 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
17 func TestMemStats(t
*testing
.T
) {
18 t
.Skip("skipping test with gccgo")
20 // Make sure there's at least one forced GC.
23 // Test that MemStats has sane values.
27 nz
:= func(x
interface{}) error
{
28 if x
!= reflect
.Zero(reflect
.TypeOf(x
)).Interface() {
31 return fmt
.Errorf("zero value")
33 le
:= func(thresh
float64) func(interface{}) error
{
34 return func(x
interface{}) error
{
35 if reflect
.ValueOf(x
).Convert(reflect
.TypeOf(thresh
)).Float() < thresh
{
38 return fmt
.Errorf("insanely high value (overflow?); want <= %v", thresh
)
41 eq
:= func(x
interface{}) func(interface{}) error
{
42 return func(y
interface{}) error
{
46 return fmt
.Errorf("want %v", x
)
49 // Of the uint fields, HeapReleased, HeapIdle can be 0.
50 // PauseTotalNs can be 0 if timer resolution is poor.
51 fields
:= map[string][]func(interface{}) error
{
52 "Alloc": {nz
, le(1e10
)}, "TotalAlloc": {nz
, le(1e11
)}, "Sys": {nz
, le(1e10
)},
53 "Lookups": {nz
, le(1e10
)}, "Mallocs": {nz
, le(1e10
)}, "Frees": {nz
, le(1e10
)},
54 "HeapAlloc": {nz
, le(1e10
)}, "HeapSys": {nz
, le(1e10
)}, "HeapIdle": {le(1e10
)},
55 "HeapInuse": {nz
, le(1e10
)}, "HeapReleased": {le(1e10
)}, "HeapObjects": {nz
, le(1e10
)},
56 "StackInuse": {nz
, le(1e10
)}, "StackSys": {nz
, le(1e10
)},
57 "MSpanInuse": {nz
, le(1e10
)}, "MSpanSys": {nz
, le(1e10
)},
58 "MCacheInuse": {nz
, le(1e10
)}, "MCacheSys": {nz
, le(1e10
)},
59 "BuckHashSys": {nz
, le(1e10
)}, "GCSys": {nz
, le(1e10
)}, "OtherSys": {nz
, le(1e10
)},
60 "NextGC": {nz
, le(1e10
)}, "LastGC": {nz
},
61 "PauseTotalNs": {le(1e11
)}, "PauseNs": nil, "PauseEnd": nil,
62 "NumGC": {nz
, le(1e9
)}, "NumForcedGC": {nz
, le(1e9
)},
63 "GCCPUFraction": {le(0.99)}, "EnableGC": {eq(true)}, "DebugGC": {eq(false)},
67 rst
:= reflect
.ValueOf(st
).Elem()
68 for i
:= 0; i
< rst
.Type().NumField(); i
++ {
69 name
, val
:= rst
.Type().Field(i
).Name
, rst
.Field(i
).Interface()
70 checks
, ok
:= fields
[name
]
72 t
.Errorf("unknown MemStats field %s", name
)
75 for _
, check
:= range checks
{
76 if err
:= check(val
); err
!= nil {
77 t
.Errorf("%s = %v: %s", name
, val
, err
)
81 if st
.Sys
!= st
.HeapSys
+st
.StackSys
+st
.MSpanSys
+st
.MCacheSys
+
82 st
.BuckHashSys
+st
.GCSys
+st
.OtherSys
{
83 t
.Fatalf("Bad sys value: %+v", *st
)
86 if st
.HeapIdle
+st
.HeapInuse
!= st
.HeapSys
{
87 t
.Fatalf("HeapIdle(%d) + HeapInuse(%d) should be equal to HeapSys(%d), but isn't.", st
.HeapIdle
, st
.HeapInuse
, st
.HeapSys
)
90 if lpe
:= st
.PauseEnd
[int(st
.NumGC
+255)%len
(st
.PauseEnd
)]; st
.LastGC
!= lpe
{
91 t
.Fatalf("LastGC(%d) != last PauseEnd(%d)", st
.LastGC
, lpe
)
95 for _
, pause
:= range st
.PauseNs
{
98 if int(st
.NumGC
) < len(st
.PauseNs
) {
99 // We have all pauses, so this should be exact.
100 if st
.PauseTotalNs
!= pauseTotal
{
101 t
.Fatalf("PauseTotalNs(%d) != sum PauseNs(%d)", st
.PauseTotalNs
, pauseTotal
)
103 for i
:= int(st
.NumGC
); i
< len(st
.PauseNs
); i
++ {
104 if st
.PauseNs
[i
] != 0 {
105 t
.Fatalf("Non-zero PauseNs[%d]: %+v", i
, st
)
107 if st
.PauseEnd
[i
] != 0 {
108 t
.Fatalf("Non-zero PauseEnd[%d]: %+v", i
, st
)
112 if st
.PauseTotalNs
< pauseTotal
{
113 t
.Fatalf("PauseTotalNs(%d) < sum PauseNs(%d)", st
.PauseTotalNs
, pauseTotal
)
117 if st
.NumForcedGC
> st
.NumGC
{
118 t
.Fatalf("NumForcedGC(%d) > NumGC(%d)", st
.NumForcedGC
, st
.NumGC
)
122 func TestStringConcatenationAllocs(t
*testing
.T
) {
123 t
.Skip("skipping test with gccgo")
124 n
:= testing
.AllocsPerRun(1e3
, func() {
125 b
:= make([]byte, 10)
126 for i
:= 0; i
< 10; i
++ {
129 s
:= "foo" + string(b
)
130 if want
:= "foo0123456789"; s
!= want
{
131 t
.Fatalf("want %v, got %v", want
, s
)
134 // Only string concatenation allocates.
136 t
.Fatalf("want 1 allocation, got %v", n
)
140 func TestTinyAlloc(t
*testing
.T
) {
142 var v
[N
]unsafe
.Pointer
144 v
[i
] = unsafe
.Pointer(new(byte))
147 chunks
:= make(map[uintptr]bool, N
)
148 for _
, p
:= range v
{
149 chunks
[uintptr(p
)&^7] = true
152 if len(chunks
) == N
{
153 t
.Fatal("no bytes allocated within the same 8-byte chunk")
157 var mallocSink
uintptr
159 func BenchmarkMalloc8(b
*testing
.B
) {
161 for i
:= 0; i
< b
.N
; i
++ {
163 x
^= uintptr(unsafe
.Pointer(p
))
168 func BenchmarkMalloc16(b
*testing
.B
) {
170 for i
:= 0; i
< b
.N
; i
++ {
172 x
^= uintptr(unsafe
.Pointer(p
))
177 func BenchmarkMallocTypeInfo8(b
*testing
.B
) {
179 for i
:= 0; i
< b
.N
; i
++ {
181 p
[8 / unsafe
.Sizeof(uintptr(0))]*int
183 x
^= uintptr(unsafe
.Pointer(p
))
188 func BenchmarkMallocTypeInfo16(b
*testing
.B
) {
190 for i
:= 0; i
< b
.N
; i
++ {
192 p
[16 / unsafe
.Sizeof(uintptr(0))]*int
194 x
^= uintptr(unsafe
.Pointer(p
))
199 var n
= flag
.Int("n", 1000, "number of goroutines")
201 func BenchmarkGoroutineSelect(b
*testing
.B
) {
202 quit
:= make(chan struct{})
203 read
:= func(ch
chan struct{}) {
215 benchHelper(b
, *n
, read
)
218 func BenchmarkGoroutineBlocking(b
*testing
.B
) {
219 read
:= func(ch
chan struct{}) {
221 if _
, ok
:= <-ch
; !ok
{
226 benchHelper(b
, *n
, read
)
229 func BenchmarkGoroutineForRange(b
*testing
.B
) {
230 read
:= func(ch
chan struct{}) {
234 benchHelper(b
, *n
, read
)
237 func benchHelper(b
*testing
.B
, n
int, read
func(chan struct{})) {
238 m
:= make([]chan struct{}, n
)
240 m
[i
] = make(chan struct{}, 1)
247 for i
:= 0; i
< b
.N
; i
++ {
248 for _
, ch
:= range m
{
253 time
.Sleep(10 * time
.Millisecond
)
259 for _
, ch
:= range m
{
262 time
.Sleep(10 * time
.Millisecond
)
265 func BenchmarkGoroutineIdle(b
*testing
.B
) {
266 quit
:= make(chan struct{})
270 for i
:= 0; i
< *n
; i
++ {
277 for i
:= 0; i
< b
.N
; i
++ {
283 time
.Sleep(10 * time
.Millisecond
)