2017-03-02 Richard Biener <rguenther@suse.de>
[official-gcc.git] / libgo / go / sync / mutex_test.go
blob88dbccf3add4226fda5b8c6cd77109ef060c5174
1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // GOMAXPROCS=10 go test
7 package sync_test
9 import (
10 "fmt"
11 "internal/testenv"
12 "os"
13 "os/exec"
14 "runtime"
15 "strings"
16 . "sync"
17 "testing"
20 func HammerSemaphore(s *uint32, loops int, cdone chan bool) {
21 for i := 0; i < loops; i++ {
22 Runtime_Semacquire(s)
23 Runtime_Semrelease(s)
25 cdone <- true
28 func TestSemaphore(t *testing.T) {
29 s := new(uint32)
30 *s = 1
31 c := make(chan bool)
32 for i := 0; i < 10; i++ {
33 go HammerSemaphore(s, 1000, c)
35 for i := 0; i < 10; i++ {
36 <-c
40 func BenchmarkUncontendedSemaphore(b *testing.B) {
41 s := new(uint32)
42 *s = 1
43 HammerSemaphore(s, b.N, make(chan bool, 2))
46 func BenchmarkContendedSemaphore(b *testing.B) {
47 b.StopTimer()
48 s := new(uint32)
49 *s = 1
50 c := make(chan bool)
51 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
52 b.StartTimer()
54 go HammerSemaphore(s, b.N/2, c)
55 go HammerSemaphore(s, b.N/2, c)
56 <-c
57 <-c
60 func HammerMutex(m *Mutex, loops int, cdone chan bool) {
61 for i := 0; i < loops; i++ {
62 m.Lock()
63 m.Unlock()
65 cdone <- true
68 func TestMutex(t *testing.T) {
69 if n := runtime.SetMutexProfileFraction(1); n != 0 {
70 t.Logf("got mutexrate %d expected 0", n)
72 defer runtime.SetMutexProfileFraction(0)
73 m := new(Mutex)
74 c := make(chan bool)
75 for i := 0; i < 10; i++ {
76 go HammerMutex(m, 1000, c)
78 for i := 0; i < 10; i++ {
79 <-c
83 var misuseTests = []struct {
84 name string
85 f func()
88 "Mutex.Unlock",
89 func() {
90 var mu Mutex
91 mu.Unlock()
95 "Mutex.Unlock2",
96 func() {
97 var mu Mutex
98 mu.Lock()
99 mu.Unlock()
100 mu.Unlock()
104 "RWMutex.Unlock",
105 func() {
106 var mu RWMutex
107 mu.Unlock()
111 "RWMutex.Unlock2",
112 func() {
113 var mu RWMutex
114 mu.RLock()
115 mu.Unlock()
119 "RWMutex.Unlock3",
120 func() {
121 var mu RWMutex
122 mu.Lock()
123 mu.Unlock()
124 mu.Unlock()
128 "RWMutex.RUnlock",
129 func() {
130 var mu RWMutex
131 mu.RUnlock()
135 "RWMutex.RUnlock2",
136 func() {
137 var mu RWMutex
138 mu.Lock()
139 mu.RUnlock()
143 "RWMutex.RUnlock3",
144 func() {
145 var mu RWMutex
146 mu.RLock()
147 mu.RUnlock()
148 mu.RUnlock()
153 func init() {
154 if len(os.Args) == 3 && os.Args[1] == "TESTMISUSE" {
155 for _, test := range misuseTests {
156 if test.name == os.Args[2] {
157 test.f()
158 fmt.Printf("test completed\n")
159 os.Exit(0)
162 fmt.Printf("unknown test\n")
163 os.Exit(0)
167 func TestMutexMisuse(t *testing.T) {
168 testenv.MustHaveExec(t)
169 for _, test := range misuseTests {
170 out, err := exec.Command(os.Args[0], "TESTMISUSE", test.name).CombinedOutput()
171 if err == nil || !strings.Contains(string(out), "unlocked") {
172 t.Errorf("%s: did not find failure with message about unlocked lock: %s\n%s\n", test.name, err, out)
177 func BenchmarkMutexUncontended(b *testing.B) {
178 type PaddedMutex struct {
179 Mutex
180 pad [128]uint8
182 b.RunParallel(func(pb *testing.PB) {
183 var mu PaddedMutex
184 for pb.Next() {
185 mu.Lock()
186 mu.Unlock()
191 func benchmarkMutex(b *testing.B, slack, work bool) {
192 var mu Mutex
193 if slack {
194 b.SetParallelism(10)
196 b.RunParallel(func(pb *testing.PB) {
197 foo := 0
198 for pb.Next() {
199 mu.Lock()
200 mu.Unlock()
201 if work {
202 for i := 0; i < 100; i++ {
203 foo *= 2
204 foo /= 2
208 _ = foo
212 func BenchmarkMutex(b *testing.B) {
213 benchmarkMutex(b, false, false)
216 func BenchmarkMutexSlack(b *testing.B) {
217 benchmarkMutex(b, true, false)
220 func BenchmarkMutexWork(b *testing.B) {
221 benchmarkMutex(b, false, true)
224 func BenchmarkMutexWorkSlack(b *testing.B) {
225 benchmarkMutex(b, true, true)
228 func BenchmarkMutexNoSpin(b *testing.B) {
229 // This benchmark models a situation where spinning in the mutex should be
230 // non-profitable and allows to confirm that spinning does not do harm.
231 // To achieve this we create excess of goroutines most of which do local work.
232 // These goroutines yield during local work, so that switching from
233 // a blocked goroutine to other goroutines is profitable.
234 // As a matter of fact, this benchmark still triggers some spinning in the mutex.
235 var m Mutex
236 var acc0, acc1 uint64
237 b.SetParallelism(4)
238 b.RunParallel(func(pb *testing.PB) {
239 c := make(chan bool)
240 var data [4 << 10]uint64
241 for i := 0; pb.Next(); i++ {
242 if i%4 == 0 {
243 m.Lock()
244 acc0 -= 100
245 acc1 += 100
246 m.Unlock()
247 } else {
248 for i := 0; i < len(data); i += 4 {
249 data[i]++
251 // Elaborate way to say runtime.Gosched
252 // that does not put the goroutine onto global runq.
253 go func() {
254 c <- true
262 func BenchmarkMutexSpin(b *testing.B) {
263 // This benchmark models a situation where spinning in the mutex should be
264 // profitable. To achieve this we create a goroutine per-proc.
265 // These goroutines access considerable amount of local data so that
266 // unnecessary rescheduling is penalized by cache misses.
267 var m Mutex
268 var acc0, acc1 uint64
269 b.RunParallel(func(pb *testing.PB) {
270 var data [16 << 10]uint64
271 for i := 0; pb.Next(); i++ {
272 m.Lock()
273 acc0 -= 100
274 acc1 += 100
275 m.Unlock()
276 for i := 0; i < len(data); i += 4 {
277 data[i]++