2016-08-05 Vladimir Makarov <vmakarov@redhat.com>
[official-gcc.git] / libgo / go / runtime / malloc_test.go
blob4f9262760e1bc39e5427db215e70fa1d8596f871
1 // Copyright 2013 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 package runtime_test
7 import (
8 "flag"
9 . "runtime"
10 "testing"
11 "time"
12 "unsafe"
15 func TestMemStats(t *testing.T) {
16 t.Skip("skipping test with gccgo")
17 // Test that MemStats has sane values.
18 st := new(MemStats)
19 ReadMemStats(st)
21 // Everything except HeapReleased, HeapIdle, and NumGC,
22 // because they indeed can be 0.
23 if st.Alloc == 0 || st.TotalAlloc == 0 || st.Sys == 0 || st.Lookups == 0 ||
24 st.Mallocs == 0 || st.Frees == 0 || st.HeapAlloc == 0 || st.HeapSys == 0 ||
25 st.HeapInuse == 0 || st.HeapObjects == 0 || st.StackInuse == 0 ||
26 st.StackSys == 0 || st.MSpanInuse == 0 || st.MSpanSys == 0 || st.MCacheInuse == 0 ||
27 st.MCacheSys == 0 || st.BuckHashSys == 0 || st.GCSys == 0 || st.OtherSys == 0 ||
28 st.NextGC == 0 {
29 t.Fatalf("Zero value: %+v", *st)
32 if st.Alloc > 1e10 || st.TotalAlloc > 1e11 || st.Sys > 1e10 || st.Lookups > 1e10 ||
33 st.Mallocs > 1e10 || st.Frees > 1e10 || st.HeapAlloc > 1e10 || st.HeapSys > 1e10 ||
34 st.HeapIdle > 1e10 || st.HeapInuse > 1e10 || st.HeapObjects > 1e10 || st.StackInuse > 1e10 ||
35 st.StackSys > 1e10 || st.MSpanInuse > 1e10 || st.MSpanSys > 1e10 || st.MCacheInuse > 1e10 ||
36 st.MCacheSys > 1e10 || st.BuckHashSys > 1e10 || st.GCSys > 1e10 || st.OtherSys > 1e10 ||
37 st.NextGC > 1e10 || st.NumGC > 1e9 || st.PauseTotalNs > 1e11 {
38 t.Fatalf("Insanely high value (overflow?): %+v", *st)
40 if st.Sys != st.HeapSys+st.StackSys+st.MSpanSys+st.MCacheSys+
41 st.BuckHashSys+st.GCSys+st.OtherSys {
42 t.Fatalf("Bad sys value: %+v", *st)
45 if st.HeapIdle+st.HeapInuse != st.HeapSys {
46 t.Fatalf("HeapIdle(%d) + HeapInuse(%d) should be equal to HeapSys(%d), but isn't.", st.HeapIdle, st.HeapInuse, st.HeapSys)
49 if lpe := st.PauseEnd[int(st.NumGC+255)%len(st.PauseEnd)]; st.LastGC != lpe {
50 t.Fatalf("LastGC(%d) != last PauseEnd(%d)", st.LastGC, lpe)
53 var pauseTotal uint64
54 for _, pause := range st.PauseNs {
55 pauseTotal += pause
57 if int(st.NumGC) < len(st.PauseNs) {
58 // We have all pauses, so this should be exact.
59 if st.PauseTotalNs != pauseTotal {
60 t.Fatalf("PauseTotalNs(%d) != sum PauseNs(%d)", st.PauseTotalNs, pauseTotal)
62 for i := int(st.NumGC); i < len(st.PauseNs); i++ {
63 if st.PauseNs[i] != 0 {
64 t.Fatalf("Non-zero PauseNs[%d]: %+v", i, st)
66 if st.PauseEnd[i] != 0 {
67 t.Fatalf("Non-zero PauseEnd[%d]: %+v", i, st)
70 } else {
71 if st.PauseTotalNs < pauseTotal {
72 t.Fatalf("PauseTotalNs(%d) < sum PauseNs(%d)", st.PauseTotalNs, pauseTotal)
77 func TestStringConcatenationAllocs(t *testing.T) {
78 t.Skip("skipping test with gccgo")
79 n := testing.AllocsPerRun(1e3, func() {
80 b := make([]byte, 10)
81 for i := 0; i < 10; i++ {
82 b[i] = byte(i) + '0'
84 s := "foo" + string(b)
85 if want := "foo0123456789"; s != want {
86 t.Fatalf("want %v, got %v", want, s)
89 // Only string concatenation allocates.
90 if n != 1 {
91 t.Fatalf("want 1 allocation, got %v", n)
95 func TestTinyAlloc(t *testing.T) {
96 const N = 16
97 var v [N]unsafe.Pointer
98 for i := range v {
99 v[i] = unsafe.Pointer(new(byte))
102 chunks := make(map[uintptr]bool, N)
103 for _, p := range v {
104 chunks[uintptr(p)&^7] = true
107 if len(chunks) == N {
108 t.Fatal("no bytes allocated within the same 8-byte chunk")
112 var mallocSink uintptr
114 func BenchmarkMalloc8(b *testing.B) {
115 var x uintptr
116 for i := 0; i < b.N; i++ {
117 p := new(int64)
118 x ^= uintptr(unsafe.Pointer(p))
120 mallocSink = x
123 func BenchmarkMalloc16(b *testing.B) {
124 var x uintptr
125 for i := 0; i < b.N; i++ {
126 p := new([2]int64)
127 x ^= uintptr(unsafe.Pointer(p))
129 mallocSink = x
132 func BenchmarkMallocTypeInfo8(b *testing.B) {
133 var x uintptr
134 for i := 0; i < b.N; i++ {
135 p := new(struct {
136 p [8 / unsafe.Sizeof(uintptr(0))]*int
138 x ^= uintptr(unsafe.Pointer(p))
140 mallocSink = x
143 func BenchmarkMallocTypeInfo16(b *testing.B) {
144 var x uintptr
145 for i := 0; i < b.N; i++ {
146 p := new(struct {
147 p [16 / unsafe.Sizeof(uintptr(0))]*int
149 x ^= uintptr(unsafe.Pointer(p))
151 mallocSink = x
154 var n = flag.Int("n", 1000, "number of goroutines")
156 func BenchmarkGoroutineSelect(b *testing.B) {
157 quit := make(chan struct{})
158 read := func(ch chan struct{}) {
159 for {
160 select {
161 case _, ok := <-ch:
162 if !ok {
163 return
165 case <-quit:
166 return
170 benchHelper(b, *n, read)
173 func BenchmarkGoroutineBlocking(b *testing.B) {
174 read := func(ch chan struct{}) {
175 for {
176 if _, ok := <-ch; !ok {
177 return
181 benchHelper(b, *n, read)
184 func BenchmarkGoroutineForRange(b *testing.B) {
185 read := func(ch chan struct{}) {
186 for _ = range ch {
189 benchHelper(b, *n, read)
192 func benchHelper(b *testing.B, n int, read func(chan struct{})) {
193 m := make([]chan struct{}, n)
194 for i := range m {
195 m[i] = make(chan struct{}, 1)
196 go read(m[i])
198 b.StopTimer()
199 b.ResetTimer()
200 GC()
202 for i := 0; i < b.N; i++ {
203 for _, ch := range m {
204 if ch != nil {
205 ch <- struct{}{}
208 time.Sleep(10 * time.Millisecond)
209 b.StartTimer()
210 GC()
211 b.StopTimer()
214 for _, ch := range m {
215 close(ch)
217 time.Sleep(10 * time.Millisecond)
220 func BenchmarkGoroutineIdle(b *testing.B) {
221 quit := make(chan struct{})
222 fn := func() {
223 <-quit
225 for i := 0; i < *n; i++ {
226 go fn()
229 GC()
230 b.ResetTimer()
232 for i := 0; i < b.N; i++ {
233 GC()
236 b.StopTimer()
237 close(quit)
238 time.Sleep(10 * time.Millisecond)