runtime: scan register backing store on ia64
[official-gcc.git] / libgo / go / runtime / malloc_test.go
blobab580f81800ed4f78782398885d5f13bc451517d
1 // Copyright 2013 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 package runtime_test
7 import (
8 "flag"
9 "fmt"
10 "reflect"
11 . "runtime"
12 "testing"
13 "time"
14 "unsafe"
17 func TestMemStats(t *testing.T) {
18 t.Skip("skipping test with gccgo")
20 // Make sure there's at least one forced GC.
21 GC()
23 // Test that MemStats has sane values.
24 st := new(MemStats)
25 ReadMemStats(st)
27 nz := func(x interface{}) error {
28 if x != reflect.Zero(reflect.TypeOf(x)).Interface() {
29 return nil
31 return fmt.Errorf("zero value")
33 le := func(thresh float64) func(interface{}) error {
34 return func(x interface{}) error {
35 if reflect.ValueOf(x).Convert(reflect.TypeOf(thresh)).Float() < thresh {
36 return nil
38 return fmt.Errorf("insanely high value (overflow?); want <= %v", thresh)
41 eq := func(x interface{}) func(interface{}) error {
42 return func(y interface{}) error {
43 if x == y {
44 return nil
46 return fmt.Errorf("want %v", x)
49 // Of the uint fields, HeapReleased, HeapIdle can be 0.
50 // PauseTotalNs can be 0 if timer resolution is poor.
51 fields := map[string][]func(interface{}) error{
52 "Alloc": {nz, le(1e10)}, "TotalAlloc": {nz, le(1e11)}, "Sys": {nz, le(1e10)},
53 "Lookups": {nz, le(1e10)}, "Mallocs": {nz, le(1e10)}, "Frees": {nz, le(1e10)},
54 "HeapAlloc": {nz, le(1e10)}, "HeapSys": {nz, le(1e10)}, "HeapIdle": {le(1e10)},
55 "HeapInuse": {nz, le(1e10)}, "HeapReleased": {le(1e10)}, "HeapObjects": {nz, le(1e10)},
56 "StackInuse": {nz, le(1e10)}, "StackSys": {nz, le(1e10)},
57 "MSpanInuse": {nz, le(1e10)}, "MSpanSys": {nz, le(1e10)},
58 "MCacheInuse": {nz, le(1e10)}, "MCacheSys": {nz, le(1e10)},
59 "BuckHashSys": {nz, le(1e10)}, "GCSys": {nz, le(1e10)}, "OtherSys": {nz, le(1e10)},
60 "NextGC": {nz, le(1e10)}, "LastGC": {nz},
61 "PauseTotalNs": {le(1e11)}, "PauseNs": nil, "PauseEnd": nil,
62 "NumGC": {nz, le(1e9)}, "NumForcedGC": {nz, le(1e9)},
63 "GCCPUFraction": {le(0.99)}, "EnableGC": {eq(true)}, "DebugGC": {eq(false)},
64 "BySize": nil,
67 rst := reflect.ValueOf(st).Elem()
68 for i := 0; i < rst.Type().NumField(); i++ {
69 name, val := rst.Type().Field(i).Name, rst.Field(i).Interface()
70 checks, ok := fields[name]
71 if !ok {
72 t.Errorf("unknown MemStats field %s", name)
73 continue
75 for _, check := range checks {
76 if err := check(val); err != nil {
77 t.Errorf("%s = %v: %s", name, val, err)
81 if st.Sys != st.HeapSys+st.StackSys+st.MSpanSys+st.MCacheSys+
82 st.BuckHashSys+st.GCSys+st.OtherSys {
83 t.Fatalf("Bad sys value: %+v", *st)
86 if st.HeapIdle+st.HeapInuse != st.HeapSys {
87 t.Fatalf("HeapIdle(%d) + HeapInuse(%d) should be equal to HeapSys(%d), but isn't.", st.HeapIdle, st.HeapInuse, st.HeapSys)
90 if lpe := st.PauseEnd[int(st.NumGC+255)%len(st.PauseEnd)]; st.LastGC != lpe {
91 t.Fatalf("LastGC(%d) != last PauseEnd(%d)", st.LastGC, lpe)
94 var pauseTotal uint64
95 for _, pause := range st.PauseNs {
96 pauseTotal += pause
98 if int(st.NumGC) < len(st.PauseNs) {
99 // We have all pauses, so this should be exact.
100 if st.PauseTotalNs != pauseTotal {
101 t.Fatalf("PauseTotalNs(%d) != sum PauseNs(%d)", st.PauseTotalNs, pauseTotal)
103 for i := int(st.NumGC); i < len(st.PauseNs); i++ {
104 if st.PauseNs[i] != 0 {
105 t.Fatalf("Non-zero PauseNs[%d]: %+v", i, st)
107 if st.PauseEnd[i] != 0 {
108 t.Fatalf("Non-zero PauseEnd[%d]: %+v", i, st)
111 } else {
112 if st.PauseTotalNs < pauseTotal {
113 t.Fatalf("PauseTotalNs(%d) < sum PauseNs(%d)", st.PauseTotalNs, pauseTotal)
117 if st.NumForcedGC > st.NumGC {
118 t.Fatalf("NumForcedGC(%d) > NumGC(%d)", st.NumForcedGC, st.NumGC)
122 func TestStringConcatenationAllocs(t *testing.T) {
123 t.Skip("skipping test with gccgo")
124 n := testing.AllocsPerRun(1e3, func() {
125 b := make([]byte, 10)
126 for i := 0; i < 10; i++ {
127 b[i] = byte(i) + '0'
129 s := "foo" + string(b)
130 if want := "foo0123456789"; s != want {
131 t.Fatalf("want %v, got %v", want, s)
134 // Only string concatenation allocates.
135 if n != 1 {
136 t.Fatalf("want 1 allocation, got %v", n)
140 func TestTinyAlloc(t *testing.T) {
141 const N = 16
142 var v [N]unsafe.Pointer
143 for i := range v {
144 v[i] = unsafe.Pointer(new(byte))
147 chunks := make(map[uintptr]bool, N)
148 for _, p := range v {
149 chunks[uintptr(p)&^7] = true
152 if len(chunks) == N {
153 t.Fatal("no bytes allocated within the same 8-byte chunk")
157 var mallocSink uintptr
159 func BenchmarkMalloc8(b *testing.B) {
160 var x uintptr
161 for i := 0; i < b.N; i++ {
162 p := new(int64)
163 x ^= uintptr(unsafe.Pointer(p))
165 mallocSink = x
168 func BenchmarkMalloc16(b *testing.B) {
169 var x uintptr
170 for i := 0; i < b.N; i++ {
171 p := new([2]int64)
172 x ^= uintptr(unsafe.Pointer(p))
174 mallocSink = x
177 func BenchmarkMallocTypeInfo8(b *testing.B) {
178 var x uintptr
179 for i := 0; i < b.N; i++ {
180 p := new(struct {
181 p [8 / unsafe.Sizeof(uintptr(0))]*int
183 x ^= uintptr(unsafe.Pointer(p))
185 mallocSink = x
188 func BenchmarkMallocTypeInfo16(b *testing.B) {
189 var x uintptr
190 for i := 0; i < b.N; i++ {
191 p := new(struct {
192 p [16 / unsafe.Sizeof(uintptr(0))]*int
194 x ^= uintptr(unsafe.Pointer(p))
196 mallocSink = x
199 var n = flag.Int("n", 1000, "number of goroutines")
201 func BenchmarkGoroutineSelect(b *testing.B) {
202 quit := make(chan struct{})
203 read := func(ch chan struct{}) {
204 for {
205 select {
206 case _, ok := <-ch:
207 if !ok {
208 return
210 case <-quit:
211 return
215 benchHelper(b, *n, read)
218 func BenchmarkGoroutineBlocking(b *testing.B) {
219 read := func(ch chan struct{}) {
220 for {
221 if _, ok := <-ch; !ok {
222 return
226 benchHelper(b, *n, read)
229 func BenchmarkGoroutineForRange(b *testing.B) {
230 read := func(ch chan struct{}) {
231 for _ = range ch {
234 benchHelper(b, *n, read)
237 func benchHelper(b *testing.B, n int, read func(chan struct{})) {
238 m := make([]chan struct{}, n)
239 for i := range m {
240 m[i] = make(chan struct{}, 1)
241 go read(m[i])
243 b.StopTimer()
244 b.ResetTimer()
245 GC()
247 for i := 0; i < b.N; i++ {
248 for _, ch := range m {
249 if ch != nil {
250 ch <- struct{}{}
253 time.Sleep(10 * time.Millisecond)
254 b.StartTimer()
255 GC()
256 b.StopTimer()
259 for _, ch := range m {
260 close(ch)
262 time.Sleep(10 * time.Millisecond)
265 func BenchmarkGoroutineIdle(b *testing.B) {
266 quit := make(chan struct{})
267 fn := func() {
268 <-quit
270 for i := 0; i < *n; i++ {
271 go fn()
274 GC()
275 b.ResetTimer()
277 for i := 0; i < b.N; i++ {
278 GC()
281 b.StopTimer()
282 close(quit)
283 time.Sleep(10 * time.Millisecond)