1 // Copyright 2011 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
18 func TestGcSys(t
*testing
.T
) {
19 if os
.Getenv("GOGC") == "off" {
20 t
.Skip("skipping test; GOGC=off in environment")
22 got
:= runTestProg(t
, "testprog", "GCSys")
25 t
.Fatalf("expected %q, but got %q", want
, got
)
29 func TestGcDeepNesting(t
*testing
.T
) {
30 type T
[2][2][2][2][2][2][2][2][2][2]*int
33 // Prevent the compiler from applying escape analysis.
34 // This makes sure new(T) is allocated on heap, not on the stack.
37 a
[0][0][0][0][0][0][0][0][0][0] = new(int)
38 *a
[0][0][0][0][0][0][0][0][0][0] = 13
40 if *a
[0][0][0][0][0][0][0][0][0][0] != 13 {
45 func TestGcHashmapIndirection(t
*testing
.T
) {
46 defer debug
.SetGCPercent(debug
.SetGCPercent(1))
52 for i
:= 0; i
< 2000; i
++ {
59 func TestGcArraySlice(t
*testing
.T
) {
66 for i
:= 0; i
< 10; i
++ {
71 p
.nextbuf
= head
.buf
[:]
76 for p
:= head
; p
!= nil; p
= p
.next
{
78 t
.Fatal("corrupted heap")
83 func TestGcRescan(t
*testing
.T
) {
94 for i
:= 0; i
< 10; i
++ {
96 p
.c
= make(chan error
)
106 for p
:= head
; p
!= nil; p
= p
.nexty
{
108 t
.Fatal("corrupted heap")
113 func TestGcLastTime(t
*testing
.T
) {
114 ms
:= new(runtime
.MemStats
)
115 t0
:= time
.Now().UnixNano()
117 t1
:= time
.Now().UnixNano()
118 runtime
.ReadMemStats(ms
)
119 last
:= int64(ms
.LastGC
)
120 if t0
> last || last
> t1
{
121 t
.Fatalf("bad last GC time: got %v, want [%v, %v]", last
, t0
, t1
)
123 pause
:= ms
.PauseNs
[(ms
.NumGC
+255)%256
]
124 // Due to timer granularity, pause can actually be 0 on windows
125 // or on virtualized environments.
127 t
.Logf("last GC pause was 0")
128 } else if pause
> 10e9
{
129 t
.Logf("bad last GC pause: got %v, want [0, 10e9]", pause
)
133 var hugeSink
interface{}
135 func TestHugeGCInfo(t
*testing
.T
) {
136 // The test ensures that compiler can chew these huge types even on weakest machines.
137 // The types are not allocated at runtime.
139 // 400MB on 32 bots, 4TB on 64-bits.
140 const n
= (400 << 20) + (unsafe
.Sizeof(uintptr(0))-4)<<40
141 hugeSink
= new([n
]*byte)
142 hugeSink
= new([n
]uintptr)
143 hugeSink
= new(struct {
148 hugeSink
= new(struct {
157 func TestPeriodicGC(t *testing.T) {
158 // Make sure we're not in the middle of a GC.
161 var ms1, ms2 runtime.MemStats
162 runtime.ReadMemStats(&ms1)
164 // Make periodic GC run continuously.
165 orig := *runtime.ForceGCPeriod
166 *runtime.ForceGCPeriod = 0
168 // Let some periodic GCs happen. In a heavily loaded system,
169 // it's possible these will be delayed, so this is designed to
170 // succeed quickly if things are working, but to give it some
171 // slack if things are slow.
174 for i := 0; i < 20 && numGCs < want; i++ {
175 time.Sleep(5 * time.Millisecond)
177 // Test that periodic GC actually happened.
178 runtime.ReadMemStats(&ms2)
179 numGCs = ms2.NumGC - ms1.NumGC
181 *runtime.ForceGCPeriod = orig
184 t.Fatalf("no periodic GC: got %v GCs, want >= 2", numGCs)
189 func BenchmarkSetTypePtr(b
*testing
.B
) {
190 benchSetType(b
, new(*byte))
193 func BenchmarkSetTypePtr8(b
*testing
.B
) {
194 benchSetType(b
, new([8]*byte))
197 func BenchmarkSetTypePtr16(b
*testing
.B
) {
198 benchSetType(b
, new([16]*byte))
201 func BenchmarkSetTypePtr32(b
*testing
.B
) {
202 benchSetType(b
, new([32]*byte))
205 func BenchmarkSetTypePtr64(b
*testing
.B
) {
206 benchSetType(b
, new([64]*byte))
209 func BenchmarkSetTypePtr126(b
*testing
.B
) {
210 benchSetType(b
, new([126]*byte))
213 func BenchmarkSetTypePtr128(b
*testing
.B
) {
214 benchSetType(b
, new([128]*byte))
217 func BenchmarkSetTypePtrSlice(b
*testing
.B
) {
218 benchSetType(b
, make([]*byte, 1<<10))
226 func BenchmarkSetTypeNode1(b
*testing
.B
) {
227 benchSetType(b
, new(Node1
))
230 func BenchmarkSetTypeNode1Slice(b
*testing
.B
) {
231 benchSetType(b
, make([]Node1
, 32))
239 func BenchmarkSetTypeNode8(b
*testing
.B
) {
240 benchSetType(b
, new(Node8
))
243 func BenchmarkSetTypeNode8Slice(b
*testing
.B
) {
244 benchSetType(b
, make([]Node8
, 32))
252 func BenchmarkSetTypeNode64(b
*testing
.B
) {
253 benchSetType(b
, new(Node64
))
256 func BenchmarkSetTypeNode64Slice(b
*testing
.B
) {
257 benchSetType(b
, make([]Node64
, 32))
260 type Node64Dead
struct {
265 func BenchmarkSetTypeNode64Dead(b
*testing
.B
) {
266 benchSetType(b
, new(Node64Dead
))
269 func BenchmarkSetTypeNode64DeadSlice(b
*testing
.B
) {
270 benchSetType(b
, make([]Node64Dead
, 32))
273 type Node124
struct {
278 func BenchmarkSetTypeNode124(b
*testing
.B
) {
279 benchSetType(b
, new(Node124
))
282 func BenchmarkSetTypeNode124Slice(b
*testing
.B
) {
283 benchSetType(b
, make([]Node124
, 32))
286 type Node126
struct {
291 func BenchmarkSetTypeNode126(b
*testing
.B
) {
292 benchSetType(b
, new(Node126
))
295 func BenchmarkSetTypeNode126Slice(b
*testing
.B
) {
296 benchSetType(b
, make([]Node126
, 32))
299 type Node128
struct {
304 func BenchmarkSetTypeNode128(b
*testing
.B
) {
305 benchSetType(b
, new(Node128
))
308 func BenchmarkSetTypeNode128Slice(b
*testing
.B
) {
309 benchSetType(b
, make([]Node128
, 32))
312 type Node130
struct {
317 func BenchmarkSetTypeNode130(b
*testing
.B
) {
318 benchSetType(b
, new(Node130
))
321 func BenchmarkSetTypeNode130Slice(b
*testing
.B
) {
322 benchSetType(b
, make([]Node130
, 32))
325 type Node1024
struct {
330 func BenchmarkSetTypeNode1024(b
*testing
.B
) {
331 benchSetType(b
, new(Node1024
))
334 func BenchmarkSetTypeNode1024Slice(b
*testing
.B
) {
335 benchSetType(b
, make([]Node1024
, 32))
338 func benchSetType(b
*testing
.B
, x
interface{}) {
339 v
:= reflect
.ValueOf(x
)
343 b
.SetBytes(int64(t
.Elem().Size()))
345 b
.SetBytes(int64(t
.Elem().Size()) * int64(v
.Len()))
348 //runtime.BenchSetType(b.N, x)
351 func BenchmarkAllocation(b
*testing
.B
) {
355 ngo
:= runtime
.GOMAXPROCS(0)
356 work
:= make(chan bool, b
.N
+ngo
)
357 result
:= make(chan *T
)
358 for i
:= 0; i
< b
.N
; i
++ {
361 for i
:= 0; i
< ngo
; i
++ {
364 for i
:= 0; i
< ngo
; i
++ {
368 for i
:= 0; i
< 1000; i
++ {
375 for i
:= 0; i
< ngo
; i
++ {
380 func TestPrintGC(t
*testing
.T
) {
382 t
.Skip("Skipping in short mode")
384 defer runtime
.GOMAXPROCS(runtime
.GOMAXPROCS(2))
385 done
:= make(chan bool)
396 for i
:= 0; i
< 1e4
; i
++ {
404 func testTypeSwitch(x
interface{}) error
{
405 switch y
:= x
.(type) {
414 func testAssert(x
interface{}) error
{
415 if y
, ok
:= x
.(error
); ok
{
421 func testAssertVar(x
interface{}) error
{
422 var y
, ok
= x
.(error
)
432 func testIfaceEqual(x
interface{}) {
438 func TestPageAccounting(t
*testing
.T
) {
439 // Grow the heap in small increments. This used to drop the
440 // pages-in-use count below zero because of a rounding
441 // mismatch (golang.org/issue/15022).
442 const blockSize
= 64 << 10
443 blocks
:= make([]*[blockSize
]byte, (64<<20)/blockSize
)
444 for i
:= range blocks
{
445 blocks
[i
] = new([blockSize
]byte)
448 // Check that the running page count matches reality.
449 pagesInUse
, counted
:= runtime
.CountPagesInUse()
450 if pagesInUse
!= counted
{
451 t
.Fatalf("mheap_.pagesInUse is %d, but direct count is %d", pagesInUse
, counted
)
455 func TestReadMemStats(t
*testing
.T
) {
456 base
, slow
:= runtime
.ReadMemStatsSlow()
458 logDiff(t
, "MemStats", reflect
.ValueOf(base
), reflect
.ValueOf(slow
))
459 t
.Fatal("memstats mismatch")
463 func logDiff(t
*testing
.T
, prefix
string, got
, want reflect
.Value
) {
466 case reflect
.Array
, reflect
.Slice
:
467 if got
.Len() != want
.Len() {
468 t
.Logf("len(%s): got %v, want %v", prefix
, got
, want
)
471 for i
:= 0; i
< got
.Len(); i
++ {
472 logDiff(t
, fmt
.Sprintf("%s[%d]", prefix
, i
), got
.Index(i
), want
.Index(i
))
475 for i
:= 0; i
< typ
.NumField(); i
++ {
476 gf
, wf
:= got
.Field(i
), want
.Field(i
)
477 logDiff(t
, prefix
+"."+typ
.Field(i
).Name
, gf
, wf
)
480 t
.Fatal("not implemented: logDiff for map")
482 if got
.Interface() != want
.Interface() {
483 t
.Logf("%s: got %v, want %v", prefix
, got
, want
)
488 func BenchmarkReadMemStats(b
*testing
.B
) {
489 var ms runtime
.MemStats
490 const heapSize
= 100 << 20
491 x
:= make([]*[1024]byte, heapSize
/1024)
493 x
[i
] = new([1024]byte)
498 for i
:= 0; i
< b
.N
; i
++ {
499 runtime
.ReadMemStats(&ms
)