1 // Copyright 2011 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
17 func TestGcSys(t
*testing
.T
) {
18 if os
.Getenv("GOGC") == "off" {
19 t
.Skip("skipping test; GOGC=off in environment")
21 got
:= runTestProg(t
, "testprog", "GCSys")
24 t
.Fatalf("expected %q, but got %q", want
, got
)
28 func TestGcDeepNesting(t
*testing
.T
) {
29 type T
[2][2][2][2][2][2][2][2][2][2]*int
32 // Prevent the compiler from applying escape analysis.
33 // This makes sure new(T) is allocated on heap, not on the stack.
36 a
[0][0][0][0][0][0][0][0][0][0] = new(int)
37 *a
[0][0][0][0][0][0][0][0][0][0] = 13
39 if *a
[0][0][0][0][0][0][0][0][0][0] != 13 {
44 func TestGcHashmapIndirection(t
*testing
.T
) {
45 defer debug
.SetGCPercent(debug
.SetGCPercent(1))
51 for i
:= 0; i
< 2000; i
++ {
58 func TestGcArraySlice(t
*testing
.T
) {
65 for i
:= 0; i
< 10; i
++ {
70 p
.nextbuf
= head
.buf
[:]
75 for p
:= head
; p
!= nil; p
= p
.next
{
77 t
.Fatal("corrupted heap")
82 func TestGcRescan(t
*testing
.T
) {
93 for i
:= 0; i
< 10; i
++ {
95 p
.c
= make(chan error
)
105 for p
:= head
; p
!= nil; p
= p
.nexty
{
107 t
.Fatal("corrupted heap")
112 func TestGcLastTime(t
*testing
.T
) {
113 ms
:= new(runtime
.MemStats
)
114 t0
:= time
.Now().UnixNano()
116 t1
:= time
.Now().UnixNano()
117 runtime
.ReadMemStats(ms
)
118 last
:= int64(ms
.LastGC
)
119 if t0
> last || last
> t1
{
120 t
.Fatalf("bad last GC time: got %v, want [%v, %v]", last
, t0
, t1
)
122 pause
:= ms
.PauseNs
[(ms
.NumGC
+255)%256
]
123 // Due to timer granularity, pause can actually be 0 on windows
124 // or on virtualized environments.
126 t
.Logf("last GC pause was 0")
127 } else if pause
> 10e9
{
128 t
.Logf("bad last GC pause: got %v, want [0, 10e9]", pause
)
132 var hugeSink
interface{}
134 func TestHugeGCInfo(t
*testing
.T
) {
135 // The test ensures that compiler can chew these huge types even on weakest machines.
136 // The types are not allocated at runtime.
138 // 400MB on 32 bots, 4TB on 64-bits.
139 const n
= (400 << 20) + (unsafe
.Sizeof(uintptr(0))-4)<<40
140 hugeSink
= new([n
]*byte)
141 hugeSink
= new([n
]uintptr)
142 hugeSink
= new(struct {
147 hugeSink
= new(struct {
156 func TestPeriodicGC(t *testing.T) {
157 // Make sure we're not in the middle of a GC.
160 var ms1, ms2 runtime.MemStats
161 runtime.ReadMemStats(&ms1)
163 // Make periodic GC run continuously.
164 orig := *runtime.ForceGCPeriod
165 *runtime.ForceGCPeriod = 0
167 // Let some periodic GCs happen. In a heavily loaded system,
168 // it's possible these will be delayed, so this is designed to
169 // succeed quickly if things are working, but to give it some
170 // slack if things are slow.
173 for i := 0; i < 20 && numGCs < want; i++ {
174 time.Sleep(5 * time.Millisecond)
176 // Test that periodic GC actually happened.
177 runtime.ReadMemStats(&ms2)
178 numGCs = ms2.NumGC - ms1.NumGC
180 *runtime.ForceGCPeriod = orig
183 t.Fatalf("no periodic GC: got %v GCs, want >= 2", numGCs)
188 func BenchmarkSetTypePtr(b
*testing
.B
) {
189 benchSetType(b
, new(*byte))
192 func BenchmarkSetTypePtr8(b
*testing
.B
) {
193 benchSetType(b
, new([8]*byte))
196 func BenchmarkSetTypePtr16(b
*testing
.B
) {
197 benchSetType(b
, new([16]*byte))
200 func BenchmarkSetTypePtr32(b
*testing
.B
) {
201 benchSetType(b
, new([32]*byte))
204 func BenchmarkSetTypePtr64(b
*testing
.B
) {
205 benchSetType(b
, new([64]*byte))
208 func BenchmarkSetTypePtr126(b
*testing
.B
) {
209 benchSetType(b
, new([126]*byte))
212 func BenchmarkSetTypePtr128(b
*testing
.B
) {
213 benchSetType(b
, new([128]*byte))
216 func BenchmarkSetTypePtrSlice(b
*testing
.B
) {
217 benchSetType(b
, make([]*byte, 1<<10))
225 func BenchmarkSetTypeNode1(b
*testing
.B
) {
226 benchSetType(b
, new(Node1
))
229 func BenchmarkSetTypeNode1Slice(b
*testing
.B
) {
230 benchSetType(b
, make([]Node1
, 32))
238 func BenchmarkSetTypeNode8(b
*testing
.B
) {
239 benchSetType(b
, new(Node8
))
242 func BenchmarkSetTypeNode8Slice(b
*testing
.B
) {
243 benchSetType(b
, make([]Node8
, 32))
251 func BenchmarkSetTypeNode64(b
*testing
.B
) {
252 benchSetType(b
, new(Node64
))
255 func BenchmarkSetTypeNode64Slice(b
*testing
.B
) {
256 benchSetType(b
, make([]Node64
, 32))
259 type Node64Dead
struct {
264 func BenchmarkSetTypeNode64Dead(b
*testing
.B
) {
265 benchSetType(b
, new(Node64Dead
))
268 func BenchmarkSetTypeNode64DeadSlice(b
*testing
.B
) {
269 benchSetType(b
, make([]Node64Dead
, 32))
272 type Node124
struct {
277 func BenchmarkSetTypeNode124(b
*testing
.B
) {
278 benchSetType(b
, new(Node124
))
281 func BenchmarkSetTypeNode124Slice(b
*testing
.B
) {
282 benchSetType(b
, make([]Node124
, 32))
285 type Node126
struct {
290 func BenchmarkSetTypeNode126(b
*testing
.B
) {
291 benchSetType(b
, new(Node126
))
294 func BenchmarkSetTypeNode126Slice(b
*testing
.B
) {
295 benchSetType(b
, make([]Node126
, 32))
298 type Node128
struct {
303 func BenchmarkSetTypeNode128(b
*testing
.B
) {
304 benchSetType(b
, new(Node128
))
307 func BenchmarkSetTypeNode128Slice(b
*testing
.B
) {
308 benchSetType(b
, make([]Node128
, 32))
311 type Node130
struct {
316 func BenchmarkSetTypeNode130(b
*testing
.B
) {
317 benchSetType(b
, new(Node130
))
320 func BenchmarkSetTypeNode130Slice(b
*testing
.B
) {
321 benchSetType(b
, make([]Node130
, 32))
324 type Node1024
struct {
329 func BenchmarkSetTypeNode1024(b
*testing
.B
) {
330 benchSetType(b
, new(Node1024
))
333 func BenchmarkSetTypeNode1024Slice(b
*testing
.B
) {
334 benchSetType(b
, make([]Node1024
, 32))
337 func benchSetType(b
*testing
.B
, x
interface{}) {
338 v
:= reflect
.ValueOf(x
)
342 b
.SetBytes(int64(t
.Elem().Size()))
344 b
.SetBytes(int64(t
.Elem().Size()) * int64(v
.Len()))
347 //runtime.BenchSetType(b.N, x)
350 func BenchmarkAllocation(b
*testing
.B
) {
354 ngo
:= runtime
.GOMAXPROCS(0)
355 work
:= make(chan bool, b
.N
+ngo
)
356 result
:= make(chan *T
)
357 for i
:= 0; i
< b
.N
; i
++ {
360 for i
:= 0; i
< ngo
; i
++ {
363 for i
:= 0; i
< ngo
; i
++ {
367 for i
:= 0; i
< 1000; i
++ {
374 for i
:= 0; i
< ngo
; i
++ {
379 func TestPrintGC(t
*testing
.T
) {
381 t
.Skip("Skipping in short mode")
383 defer runtime
.GOMAXPROCS(runtime
.GOMAXPROCS(2))
384 done
:= make(chan bool)
395 for i
:= 0; i
< 1e4
; i
++ {
405 // The implicit y, ok := x.(error) for the case error
406 // in testTypeSwitch used to not initialize the result y
407 // before passing &y to assertE2I2GC.
408 // Catch this by making assertE2I2 call runtime.GC,
409 // which will force a stack scan and failure if there are
410 // bad pointers, and then fill the stack with bad pointers
411 // and run the type switch.
412 func TestAssertE2I2Liveness(t *testing.T) {
413 // Note that this flag is defined in export_test.go
414 // and is not available to ordinary imports of runtime.
415 *runtime.TestingAssertE2I2GC = true
417 *runtime.TestingAssertE2I2GC = false
421 testTypeSwitch(io.EOF)
425 testAssertVar(io.EOF)
428 func poisonStack() uintptr {
436 func testTypeSwitch(x interface{}) error {
437 switch y := x.(type) {
446 func testAssert(x interface{}) error {
447 if y, ok := x.(error); ok {
453 func testAssertVar(x interface{}) error {
454 var y, ok = x.(error)
461 func TestAssertE2T2Liveness(t *testing.T) {
462 *runtime.TestingAssertE2T2GC = true
464 *runtime.TestingAssertE2T2GC = false
468 testIfaceEqual(io.EOF)
474 func testIfaceEqual(x interface{}) {
480 func TestPageAccounting(t *testing.T) {
481 // Grow the heap in small increments. This used to drop the
482 // pages-in-use count below zero because of a rounding
483 // mismatch (golang.org/issue/15022).
484 const blockSize = 64 << 10
485 blocks := make([]*[blockSize]byte, (64<<20)/blockSize)
486 for i := range blocks {
487 blocks[i] = new([blockSize]byte)
490 // Check that the running page count matches reality.
491 pagesInUse, counted := runtime.CountPagesInUse()
492 if pagesInUse != counted {
493 t.Fatalf("mheap_.pagesInUse is %d, but direct count is %d", pagesInUse, counted)