1 // Copyright 2011 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
19 func TestGcSys(t
*testing
.T
) {
20 t
.Skip("does not test anything; https://golang.org/issue/23343")
21 if os
.Getenv("GOGC") == "off" {
22 t
.Skip("skipping test; GOGC=off in environment")
24 got
:= runTestProg(t
, "testprog", "GCSys")
27 t
.Fatalf("expected %q, but got %q", want
, got
)
31 func TestGcDeepNesting(t
*testing
.T
) {
32 type T
[2][2][2][2][2][2][2][2][2][2]*int
35 // Prevent the compiler from applying escape analysis.
36 // This makes sure new(T) is allocated on heap, not on the stack.
39 a
[0][0][0][0][0][0][0][0][0][0] = new(int)
40 *a
[0][0][0][0][0][0][0][0][0][0] = 13
42 if *a
[0][0][0][0][0][0][0][0][0][0] != 13 {
47 func TestGcHashmapIndirection(t
*testing
.T
) {
48 defer debug
.SetGCPercent(debug
.SetGCPercent(1))
54 for i
:= 0; i
< 2000; i
++ {
61 func TestGcArraySlice(t
*testing
.T
) {
68 for i
:= 0; i
< 10; i
++ {
73 p
.nextbuf
= head
.buf
[:]
78 for p
:= head
; p
!= nil; p
= p
.next
{
80 t
.Fatal("corrupted heap")
85 func TestGcRescan(t
*testing
.T
) {
96 for i
:= 0; i
< 10; i
++ {
98 p
.c
= make(chan error
)
108 for p
:= head
; p
!= nil; p
= p
.nexty
{
110 t
.Fatal("corrupted heap")
115 func TestGcLastTime(t
*testing
.T
) {
116 ms
:= new(runtime
.MemStats
)
117 t0
:= time
.Now().UnixNano()
119 t1
:= time
.Now().UnixNano()
120 runtime
.ReadMemStats(ms
)
121 last
:= int64(ms
.LastGC
)
122 if t0
> last || last
> t1
{
123 t
.Fatalf("bad last GC time: got %v, want [%v, %v]", last
, t0
, t1
)
125 pause
:= ms
.PauseNs
[(ms
.NumGC
+255)%256
]
126 // Due to timer granularity, pause can actually be 0 on windows
127 // or on virtualized environments.
129 t
.Logf("last GC pause was 0")
130 } else if pause
> 10e9
{
131 t
.Logf("bad last GC pause: got %v, want [0, 10e9]", pause
)
135 var hugeSink
interface{}
137 func TestHugeGCInfo(t
*testing
.T
) {
138 // The test ensures that compiler can chew these huge types even on weakest machines.
139 // The types are not allocated at runtime.
141 // 400MB on 32 bots, 4TB on 64-bits.
142 const n
= (400 << 20) + (unsafe
.Sizeof(uintptr(0))-4)<<40
143 hugeSink
= new([n
]*byte)
144 hugeSink
= new([n
]uintptr)
145 hugeSink
= new(struct {
150 hugeSink
= new(struct {
159 func TestPeriodicGC(t *testing.T) {
160 // Make sure we're not in the middle of a GC.
163 var ms1, ms2 runtime.MemStats
164 runtime.ReadMemStats(&ms1)
166 // Make periodic GC run continuously.
167 orig := *runtime.ForceGCPeriod
168 *runtime.ForceGCPeriod = 0
170 // Let some periodic GCs happen. In a heavily loaded system,
171 // it's possible these will be delayed, so this is designed to
172 // succeed quickly if things are working, but to give it some
173 // slack if things are slow.
176 for i := 0; i < 200 && numGCs < want; i++ {
177 time.Sleep(5 * time.Millisecond)
179 // Test that periodic GC actually happened.
180 runtime.ReadMemStats(&ms2)
181 numGCs = ms2.NumGC - ms1.NumGC
183 *runtime.ForceGCPeriod = orig
186 t.Fatalf("no periodic GC: got %v GCs, want >= 2", numGCs)
191 func BenchmarkSetTypePtr(b
*testing
.B
) {
192 benchSetType(b
, new(*byte))
195 func BenchmarkSetTypePtr8(b
*testing
.B
) {
196 benchSetType(b
, new([8]*byte))
199 func BenchmarkSetTypePtr16(b
*testing
.B
) {
200 benchSetType(b
, new([16]*byte))
203 func BenchmarkSetTypePtr32(b
*testing
.B
) {
204 benchSetType(b
, new([32]*byte))
207 func BenchmarkSetTypePtr64(b
*testing
.B
) {
208 benchSetType(b
, new([64]*byte))
211 func BenchmarkSetTypePtr126(b
*testing
.B
) {
212 benchSetType(b
, new([126]*byte))
215 func BenchmarkSetTypePtr128(b
*testing
.B
) {
216 benchSetType(b
, new([128]*byte))
219 func BenchmarkSetTypePtrSlice(b
*testing
.B
) {
220 benchSetType(b
, make([]*byte, 1<<10))
228 func BenchmarkSetTypeNode1(b
*testing
.B
) {
229 benchSetType(b
, new(Node1
))
232 func BenchmarkSetTypeNode1Slice(b
*testing
.B
) {
233 benchSetType(b
, make([]Node1
, 32))
241 func BenchmarkSetTypeNode8(b
*testing
.B
) {
242 benchSetType(b
, new(Node8
))
245 func BenchmarkSetTypeNode8Slice(b
*testing
.B
) {
246 benchSetType(b
, make([]Node8
, 32))
254 func BenchmarkSetTypeNode64(b
*testing
.B
) {
255 benchSetType(b
, new(Node64
))
258 func BenchmarkSetTypeNode64Slice(b
*testing
.B
) {
259 benchSetType(b
, make([]Node64
, 32))
262 type Node64Dead
struct {
267 func BenchmarkSetTypeNode64Dead(b
*testing
.B
) {
268 benchSetType(b
, new(Node64Dead
))
271 func BenchmarkSetTypeNode64DeadSlice(b
*testing
.B
) {
272 benchSetType(b
, make([]Node64Dead
, 32))
275 type Node124
struct {
280 func BenchmarkSetTypeNode124(b
*testing
.B
) {
281 benchSetType(b
, new(Node124
))
284 func BenchmarkSetTypeNode124Slice(b
*testing
.B
) {
285 benchSetType(b
, make([]Node124
, 32))
288 type Node126
struct {
293 func BenchmarkSetTypeNode126(b
*testing
.B
) {
294 benchSetType(b
, new(Node126
))
297 func BenchmarkSetTypeNode126Slice(b
*testing
.B
) {
298 benchSetType(b
, make([]Node126
, 32))
301 type Node128
struct {
306 func BenchmarkSetTypeNode128(b
*testing
.B
) {
307 benchSetType(b
, new(Node128
))
310 func BenchmarkSetTypeNode128Slice(b
*testing
.B
) {
311 benchSetType(b
, make([]Node128
, 32))
314 type Node130
struct {
319 func BenchmarkSetTypeNode130(b
*testing
.B
) {
320 benchSetType(b
, new(Node130
))
323 func BenchmarkSetTypeNode130Slice(b
*testing
.B
) {
324 benchSetType(b
, make([]Node130
, 32))
327 type Node1024
struct {
332 func BenchmarkSetTypeNode1024(b
*testing
.B
) {
333 benchSetType(b
, new(Node1024
))
336 func BenchmarkSetTypeNode1024Slice(b
*testing
.B
) {
337 benchSetType(b
, make([]Node1024
, 32))
340 func benchSetType(b
*testing
.B
, x
interface{}) {
341 v
:= reflect
.ValueOf(x
)
345 b
.SetBytes(int64(t
.Elem().Size()))
347 b
.SetBytes(int64(t
.Elem().Size()) * int64(v
.Len()))
350 //runtime.BenchSetType(b.N, x)
353 func BenchmarkAllocation(b
*testing
.B
) {
357 ngo
:= runtime
.GOMAXPROCS(0)
358 work
:= make(chan bool, b
.N
+ngo
)
359 result
:= make(chan *T
)
360 for i
:= 0; i
< b
.N
; i
++ {
363 for i
:= 0; i
< ngo
; i
++ {
366 for i
:= 0; i
< ngo
; i
++ {
370 for i
:= 0; i
< 1000; i
++ {
377 for i
:= 0; i
< ngo
; i
++ {
382 func TestPrintGC(t
*testing
.T
) {
384 t
.Skip("Skipping in short mode")
386 defer runtime
.GOMAXPROCS(runtime
.GOMAXPROCS(2))
387 done
:= make(chan bool)
398 for i
:= 0; i
< 1e4
; i
++ {
406 func testTypeSwitch(x
interface{}) error
{
407 switch y
:= x
.(type) {
416 func testAssert(x
interface{}) error
{
417 if y
, ok
:= x
.(error
); ok
{
423 func testAssertVar(x
interface{}) error
{
424 var y
, ok
= x
.(error
)
434 func testIfaceEqual(x
interface{}) {
440 func TestPageAccounting(t
*testing
.T
) {
441 // Grow the heap in small increments. This used to drop the
442 // pages-in-use count below zero because of a rounding
443 // mismatch (golang.org/issue/15022).
444 const blockSize
= 64 << 10
445 blocks
:= make([]*[blockSize
]byte, (64<<20)/blockSize
)
446 for i
:= range blocks
{
447 blocks
[i
] = new([blockSize
]byte)
450 // Check that the running page count matches reality.
451 pagesInUse
, counted
:= runtime
.CountPagesInUse()
452 if pagesInUse
!= counted
{
453 t
.Fatalf("mheap_.pagesInUse is %d, but direct count is %d", pagesInUse
, counted
)
457 func TestReadMemStats(t
*testing
.T
) {
458 base
, slow
:= runtime
.ReadMemStatsSlow()
460 logDiff(t
, "MemStats", reflect
.ValueOf(base
), reflect
.ValueOf(slow
))
461 t
.Fatal("memstats mismatch")
465 func logDiff(t
*testing
.T
, prefix
string, got
, want reflect
.Value
) {
468 case reflect
.Array
, reflect
.Slice
:
469 if got
.Len() != want
.Len() {
470 t
.Logf("len(%s): got %v, want %v", prefix
, got
, want
)
473 for i
:= 0; i
< got
.Len(); i
++ {
474 logDiff(t
, fmt
.Sprintf("%s[%d]", prefix
, i
), got
.Index(i
), want
.Index(i
))
477 for i
:= 0; i
< typ
.NumField(); i
++ {
478 gf
, wf
:= got
.Field(i
), want
.Field(i
)
479 logDiff(t
, prefix
+"."+typ
.Field(i
).Name
, gf
, wf
)
482 t
.Fatal("not implemented: logDiff for map")
484 if got
.Interface() != want
.Interface() {
485 t
.Logf("%s: got %v, want %v", prefix
, got
, want
)
490 func BenchmarkReadMemStats(b
*testing
.B
) {
491 var ms runtime
.MemStats
492 const heapSize
= 100 << 20
493 x
:= make([]*[1024]byte, heapSize
/1024)
495 x
[i
] = new([1024]byte)
500 for i
:= 0; i
< b
.N
; i
++ {
501 runtime
.ReadMemStats(&ms
)
507 func TestUserForcedGC(t
*testing
.T
) {
508 // Test that runtime.GC() triggers a GC even if GOGC=off.
509 defer debug
.SetGCPercent(debug
.SetGCPercent(-1))
511 var ms1
, ms2 runtime
.MemStats
512 runtime
.ReadMemStats(&ms1
)
514 runtime
.ReadMemStats(&ms2
)
515 if ms1
.NumGC
== ms2
.NumGC
{
516 t
.Fatalf("runtime.GC() did not trigger GC")
518 if ms1
.NumForcedGC
== ms2
.NumForcedGC
{
519 t
.Fatalf("runtime.GC() was not accounted in NumForcedGC")
523 func writeBarrierBenchmark(b
*testing
.B
, f
func()) {
525 var ms runtime
.MemStats
526 runtime
.ReadMemStats(&ms
)
527 //b.Logf("heap size: %d MB", ms.HeapAlloc>>20)
529 // Keep GC running continuously during the benchmark, which in
530 // turn keeps the write barrier on continuously.
532 done
:= make(chan bool)
534 for atomic
.LoadUint32(&stop
) == 0 {
540 atomic
.StoreUint32(&stop
, 1)
549 func BenchmarkWriteBarrier(b
*testing
.B
) {
550 if runtime
.GOMAXPROCS(-1) < 2 {
551 // We don't want GC to take our time.
552 b
.Skip("need GOMAXPROCS >= 2")
555 // Construct a large tree both so the GC runs for a while and
556 // so we have a data structure to manipulate the pointers of.
561 var mkTree
func(level
int) *node
562 mkTree
= func(level
int) *node
{
566 n
:= &node
{mkTree(level
- 1), mkTree(level
- 1)}
568 // Seed GC with enough early pointers so it
569 // doesn't accidentally switch to mark 2 when
570 // it only has the top of the tree.
571 wbRoots
= append(wbRoots
, n
)
575 const depth
= 22 // 64 MB
578 writeBarrierBenchmark(b
, func() {
579 var stack
[depth
]*node
582 // There are two write barriers per iteration, so i+=2.
583 for i
:= 0; i
< b
.N
; i
+= 2 {
589 // Perform one step of reversing the tree.
601 // Avoid non-preemptible loops (see issue #10958).
607 runtime
.KeepAlive(wbRoots
)
610 func BenchmarkBulkWriteBarrier(b
*testing
.B
) {
611 if runtime
.GOMAXPROCS(-1) < 2 {
612 // We don't want GC to take our time.
613 b
.Skip("need GOMAXPROCS >= 2")
616 // Construct a large set of objects we can copy around.
617 const heapSize
= 64 << 20
619 ptrs
:= make([]*obj
, heapSize
/unsafe
.Sizeof(obj
{}))
620 for i
:= range ptrs
{
624 writeBarrierBenchmark(b
, func() {
625 const blockSize
= 1024
627 for i
:= 0; i
< b
.N
; i
+= blockSize
{
629 block
:= ptrs
[pos
: pos
+blockSize
]
631 copy(block
, block
[1:])
632 block
[blockSize
-1] = first
635 if pos
+blockSize
> len(ptrs
) {
643 runtime
.KeepAlive(ptrs
)