1 // Copyright 2011 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
16 var stop
= make(chan bool, 1)
18 func perpetuumMobile() {
26 func TestStopTheWorldDeadlock(t
*testing
.T
) {
28 t
.Skip("skipping during short test")
30 maxprocs
:= runtime
.GOMAXPROCS(3)
31 compl
:= make(chan bool, 2)
33 for i
:= 0; i
!= 1000; i
+= 1 {
39 for i
:= 0; i
!= 1000; i
+= 1 {
48 runtime
.GOMAXPROCS(maxprocs
)
51 func TestYieldProgress(t
*testing
.T
) {
52 testYieldProgress(t
, false)
55 func TestYieldLockedProgress(t
*testing
.T
) {
56 testYieldProgress(t
, true)
59 func testYieldProgress(t
*testing
.T
, locked
bool) {
61 cack
:= make(chan bool)
64 runtime
.LockOSThread()
76 time
.Sleep(10 * time
.Millisecond
)
81 func TestYieldLocked(t
*testing
.T
) {
85 runtime
.LockOSThread()
86 for i
:= 0; i
< N
; i
++ {
88 time
.Sleep(time
.Millisecond
)
91 // runtime.UnlockOSThread() is deliberately omitted
96 func TestGoroutineParallelism(t
*testing
.T
) {
103 defer runtime
.GOMAXPROCS(runtime
.GOMAXPROCS(P
))
104 for try
:= 0; try
< N
; try
++ {
105 done
:= make(chan bool)
107 for p
:= 0; p
< P
; p
++ {
108 // Test that all P goroutines are scheduled at the same time
110 for i
:= 0; i
< 3; i
++ {
111 expected
:= uint32(P
*i
+ p
)
112 for atomic
.LoadUint32(&x
) != expected
{
114 atomic
.StoreUint32(&x
, expected
+1)
119 for p
:= 0; p
< P
; p
++ {
125 func TestBlockLocked(t
*testing
.T
) {
129 runtime
.LockOSThread()
130 for i
:= 0; i
< N
; i
++ {
133 runtime
.UnlockOSThread()
135 for i
:= 0; i
< N
; i
++ {
140 func TestTimerFairness(t
*testing
.T
) {
141 done
:= make(chan bool)
143 for i
:= 0; i
< 2; i
++ {
155 timer
:= time
.After(20 * time
.Millisecond
)
166 func TestTimerFairness2(t
*testing
.T
) {
167 done
:= make(chan bool)
169 for i
:= 0; i
< 2; i
++ {
171 timer
:= time
.After(20 * time
.Millisecond
)
174 syscall
.Read(0, buf
[0:0])
189 // The function is used to test preemption at split stack checks.
190 // Declaring a var avoids inlining at the call site.
191 var preempt
= func() int {
194 for _
, v
:= range a
{
200 func TestPreemption(t
*testing
.T
) {
201 t
.Skip("gccgo does not implement preemption")
202 // Test that goroutines are preempted at function calls.
209 for g
:= 0; g
< 2; g
++ {
211 for i
:= 0; i
< N
; i
++ {
212 for atomic
.LoadUint32(&x
) != uint32(g
) {
215 atomic
.StoreUint32(&x
, uint32(1-g
))
224 func TestPreemptionGC(t
*testing
.T
) {
225 t
.Skip("gccgo does not implement preemption")
226 // Test that pending GC preempts running goroutines.
233 defer runtime
.GOMAXPROCS(runtime
.GOMAXPROCS(P
+ 1))
235 for i
:= 0; i
< P
; i
++ {
237 for atomic
.LoadUint32(&stop
) == 0 {
242 for i
:= 0; i
< N
; i
++ {
246 atomic
.StoreUint32(&stop
, 1)
249 func TestGCFairness(t
*testing
.T
) {
250 output
:= executeTest(t
, testGCFairnessSource
, nil)
253 t
.Fatalf("want %s, got %s\n", want
, output
)
257 const testGCFairnessSource
= `
268 runtime.GOMAXPROCS(1)
269 f, err := os.Open("/dev/null")
270 if os.IsNotExist(err) {
271 // This test tests what it is intended to test only if writes are fast.
272 // If there is no /dev/null, we just don't execute the test.
280 for i := 0; i < 2; i++ {
287 time.Sleep(10 * time.Millisecond)
292 func stackGrowthRecursive(i
int) {
294 if i
!= 0 && pad
[0] == 0 {
295 stackGrowthRecursive(i
- 1)
299 func TestPreemptSplitBig(t
*testing
.T
) {
301 t
.Skip("skipping in -short mode")
303 t
.Skip("gccgo does not implement preemption")
304 defer runtime
.GOMAXPROCS(runtime
.GOMAXPROCS(2))
305 stop
:= make(chan int)
307 for i
:= 0; i
< 3; i
++ {
308 time
.Sleep(10 * time
.Microsecond
) // let big start running
314 func big(stop
chan int) int {
317 // delay so that gc is sure to have asked for a preemption
318 for i
:= 0; i
< 1e9
; i
++ {
322 // call bigframe, which used to miss the preemption in its prologue.
325 // check if we've been asked to stop.
333 func bigframe(stop
chan int) int {
334 // not splitting the stack will overflow.
335 // small will notice that it needs a stack split and will
336 // catch the overflow.
338 return small(stop
, &x
)
341 func small(stop
chan int, x
*[8192]byte) int {
350 // keep small from being a leaf function, which might
351 // make it not do any stack check at all.
357 func nonleaf(stop
chan int) bool {
358 // do something that won't be inlined:
367 func TestSchedLocalQueue(t
*testing
.T
) {
368 runtime
.TestSchedLocalQueue1()
371 func TestSchedLocalQueueSteal(t
*testing
.T
) {
372 runtime
.TestSchedLocalQueueSteal1()
375 func benchmarkStackGrowth(b
*testing
.B
, rec
int) {
376 b
.RunParallel(func(pb
*testing
.PB
) {
378 stackGrowthRecursive(rec
)
383 func BenchmarkStackGrowth(b
*testing
.B
) {
384 benchmarkStackGrowth(b
, 10)
387 func BenchmarkStackGrowthDeep(b
*testing
.B
) {
388 benchmarkStackGrowth(b
, 1024)
391 func BenchmarkCreateGoroutines(b
*testing
.B
) {
392 benchmarkCreateGoroutines(b
, 1)
395 func BenchmarkCreateGoroutinesParallel(b
*testing
.B
) {
396 benchmarkCreateGoroutines(b
, runtime
.GOMAXPROCS(-1))
399 func benchmarkCreateGoroutines(b
*testing
.B
, procs
int) {
409 for i
:= 0; i
< procs
; i
++ {
412 for i
:= 0; i
< procs
; i
++ {
417 type Matrix
[][]float64
419 func BenchmarkMatmult(b
*testing
.B
) {
421 // matmult is O(N**3) but testing expects O(b.N),
422 // so we need to take cube root of b.N
423 n
:= int(math
.Cbrt(float64(b
.N
))) + 1
428 matmult(nil, A
, B
, C
, 0, n
, 0, n
, 0, n
, 8)
431 func makeMatrix(n
int) Matrix
{
433 for i
:= 0; i
< n
; i
++ {
434 m
[i
] = make([]float64, n
)
435 for j
:= 0; j
< n
; j
++ {
436 m
[i
][j
] = float64(i
*n
+ j
)
442 func matmult(done
chan<- struct{}, A
, B
, C Matrix
, i0
, i1
, j0
, j1
, k0
, k1
, threshold
int) {
446 if di
>= dj
&& di
>= dk
&& di
>= threshold
{
447 // divide in two by y axis
449 done1
:= make(chan struct{}, 1)
450 go matmult(done1
, A
, B
, C
, i0
, mi
, j0
, j1
, k0
, k1
, threshold
)
451 matmult(nil, A
, B
, C
, mi
, i1
, j0
, j1
, k0
, k1
, threshold
)
453 } else if dj
>= dk
&& dj
>= threshold
{
454 // divide in two by x axis
456 done1
:= make(chan struct{}, 1)
457 go matmult(done1
, A
, B
, C
, i0
, i1
, j0
, mj
, k0
, k1
, threshold
)
458 matmult(nil, A
, B
, C
, i0
, i1
, mj
, j1
, k0
, k1
, threshold
)
460 } else if dk
>= threshold
{
461 // divide in two by "k" axis
462 // deliberately not parallel because of data races
464 matmult(nil, A
, B
, C
, i0
, i1
, j0
, j1
, k0
, mk
, threshold
)
465 matmult(nil, A
, B
, C
, i0
, i1
, j0
, j1
, mk
, k1
, threshold
)
467 // the matrices are small enough, compute directly
468 for i
:= i0
; i
< i1
; i
++ {
469 for j
:= j0
; j
< j1
; j
++ {
470 for k
:= k0
; k
< k1
; k
++ {
471 C
[i
][j
] += A
[i
][k
] * B
[k
][j
]