1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // GOMAXPROCS=10 go test
21 func HammerSemaphore(s
*uint32, loops
int, cdone
chan bool) {
22 for i
:= 0; i
< loops
; i
++ {
24 Runtime_Semrelease(s
, false, 0)
29 func TestSemaphore(t
*testing
.T
) {
33 for i
:= 0; i
< 10; i
++ {
34 go HammerSemaphore(s
, 1000, c
)
36 for i
:= 0; i
< 10; i
++ {
41 func BenchmarkUncontendedSemaphore(b
*testing
.B
) {
44 HammerSemaphore(s
, b
.N
, make(chan bool, 2))
47 func BenchmarkContendedSemaphore(b
*testing
.B
) {
52 defer runtime
.GOMAXPROCS(runtime
.GOMAXPROCS(2))
55 go HammerSemaphore(s
, b
.N
/2, c
)
56 go HammerSemaphore(s
, b
.N
/2, c
)
61 func HammerMutex(m
*Mutex
, loops
int, cdone
chan bool) {
62 for i
:= 0; i
< loops
; i
++ {
75 func TestMutex(t
*testing
.T
) {
76 if n
:= runtime
.SetMutexProfileFraction(1); n
!= 0 {
77 t
.Logf("got mutexrate %d expected 0", n
)
79 defer runtime
.SetMutexProfileFraction(0)
85 t
.Fatalf("TryLock succeeded with mutex locked")
89 t
.Fatalf("TryLock failed with mutex unlocked")
94 for i
:= 0; i
< 10; i
++ {
95 go HammerMutex(m
, 1000, c
)
97 for i
:= 0; i
< 10; i
++ {
102 var misuseTests
= []struct {
173 if len(os
.Args
) == 3 && os
.Args
[1] == "TESTMISUSE" {
174 for _
, test
:= range misuseTests
{
175 if test
.name
== os
.Args
[2] {
177 defer func() { recover() }()
180 fmt
.Printf("test completed\n")
184 fmt
.Printf("unknown test\n")
189 func TestMutexMisuse(t
*testing
.T
) {
190 testenv
.MustHaveExec(t
)
191 for _
, test
:= range misuseTests
{
192 out
, err
:= exec
.Command(os
.Args
[0], "TESTMISUSE", test
.name
).CombinedOutput()
193 if err
== nil ||
!strings
.Contains(string(out
), "unlocked") {
194 t
.Errorf("%s: did not find failure with message about unlocked lock: %s\n%s\n", test
.name
, err
, out
)
199 func TestMutexFairness(t
*testing
.T
) {
201 stop
:= make(chan bool)
206 time
.Sleep(100 * time
.Microsecond
)
215 done
:= make(chan bool, 1)
217 for i
:= 0; i
< 10; i
++ {
218 time
.Sleep(100 * time
.Microsecond
)
226 case <-time
.After(10 * time
.Second
):
227 t
.Fatalf("can't acquire Mutex in 10 seconds")
231 func BenchmarkMutexUncontended(b
*testing
.B
) {
232 type PaddedMutex
struct {
236 b
.RunParallel(func(pb
*testing
.PB
) {
245 func benchmarkMutex(b
*testing
.B
, slack
, work
bool) {
250 b
.RunParallel(func(pb
*testing
.PB
) {
256 for i
:= 0; i
< 100; i
++ {
266 func BenchmarkMutex(b
*testing
.B
) {
267 benchmarkMutex(b
, false, false)
270 func BenchmarkMutexSlack(b
*testing
.B
) {
271 benchmarkMutex(b
, true, false)
274 func BenchmarkMutexWork(b
*testing
.B
) {
275 benchmarkMutex(b
, false, true)
278 func BenchmarkMutexWorkSlack(b
*testing
.B
) {
279 benchmarkMutex(b
, true, true)
282 func BenchmarkMutexNoSpin(b
*testing
.B
) {
283 // This benchmark models a situation where spinning in the mutex should be
284 // non-profitable and allows to confirm that spinning does not do harm.
285 // To achieve this we create excess of goroutines most of which do local work.
286 // These goroutines yield during local work, so that switching from
287 // a blocked goroutine to other goroutines is profitable.
288 // As a matter of fact, this benchmark still triggers some spinning in the mutex.
290 var acc0
, acc1
uint64
292 b
.RunParallel(func(pb
*testing
.PB
) {
294 var data
[4 << 10]uint64
295 for i
:= 0; pb
.Next(); i
++ {
302 for i
:= 0; i
< len(data
); i
+= 4 {
305 // Elaborate way to say runtime.Gosched
306 // that does not put the goroutine onto global runq.
316 func BenchmarkMutexSpin(b
*testing
.B
) {
317 // This benchmark models a situation where spinning in the mutex should be
318 // profitable. To achieve this we create a goroutine per-proc.
319 // These goroutines access considerable amount of local data so that
320 // unnecessary rescheduling is penalized by cache misses.
322 var acc0
, acc1
uint64
323 b
.RunParallel(func(pb
*testing
.PB
) {
324 var data
[16 << 10]uint64
325 for i
:= 0; pb
.Next(); i
++ {
330 for i
:= 0; i
< len(data
); i
+= 4 {