Revert "Set num_threads to 50 on 32-bit hppa in two libgomp loop tests"
[official-gcc.git] / libgo / go / sync / pool_test.go
blob506c3677796811f17a1534b4f783aeea9fcc114f
1 // Copyright 2013 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Pool is no-op under race detector, so all these tests do not work.
6 //go:build !race
8 package sync_test
10 import (
11 "runtime"
12 "runtime/debug"
13 "sort"
14 . "sync"
15 "sync/atomic"
16 "testing"
17 "time"
20 func TestPool(t *testing.T) {
21 // disable GC so we can control when it happens.
22 defer debug.SetGCPercent(debug.SetGCPercent(-1))
23 var p Pool
24 if p.Get() != nil {
25 t.Fatal("expected empty")
28 // Make sure that the goroutine doesn't migrate to another P
29 // between Put and Get calls.
30 Runtime_procPin()
31 p.Put("a")
32 p.Put("b")
33 if g := p.Get(); g != "a" {
34 t.Fatalf("got %#v; want a", g)
36 if g := p.Get(); g != "b" {
37 t.Fatalf("got %#v; want b", g)
39 if g := p.Get(); g != nil {
40 t.Fatalf("got %#v; want nil", g)
42 Runtime_procUnpin()
44 // Put in a large number of objects so they spill into
45 // stealable space.
46 for i := 0; i < 100; i++ {
47 p.Put("c")
49 // After one GC, the victim cache should keep them alive.
50 runtime.GC()
51 if g := p.Get(); g != "c" {
52 t.Fatalf("got %#v; want c after GC", g)
54 // A second GC should drop the victim cache.
55 runtime.GC()
56 if g := p.Get(); g != nil {
57 t.Fatalf("got %#v; want nil after second GC", g)
61 func TestPoolNew(t *testing.T) {
62 // disable GC so we can control when it happens.
63 defer debug.SetGCPercent(debug.SetGCPercent(-1))
65 i := 0
66 p := Pool{
67 New: func() any {
68 i++
69 return i
72 if v := p.Get(); v != 1 {
73 t.Fatalf("got %v; want 1", v)
75 if v := p.Get(); v != 2 {
76 t.Fatalf("got %v; want 2", v)
79 // Make sure that the goroutine doesn't migrate to another P
80 // between Put and Get calls.
81 Runtime_procPin()
82 p.Put(42)
83 if v := p.Get(); v != 42 {
84 t.Fatalf("got %v; want 42", v)
86 Runtime_procUnpin()
88 if v := p.Get(); v != 3 {
89 t.Fatalf("got %v; want 3", v)
93 // Test that Pool does not hold pointers to previously cached resources.
94 func TestPoolGC(t *testing.T) {
95 testPool(t, true)
98 // Test that Pool releases resources on GC.
99 func TestPoolRelease(t *testing.T) {
100 testPool(t, false)
103 func testPool(t *testing.T, drain bool) {
104 t.Skip("gccgo imprecise GC breaks this test")
105 var p Pool
106 const N = 100
107 loop:
108 for try := 0; try < 3; try++ {
109 if try == 1 && testing.Short() {
110 break
112 var fin, fin1 uint32
113 for i := 0; i < N; i++ {
114 v := new(string)
115 runtime.SetFinalizer(v, func(vv *string) {
116 atomic.AddUint32(&fin, 1)
118 p.Put(v)
120 if drain {
121 for i := 0; i < N; i++ {
122 p.Get()
125 for i := 0; i < 5; i++ {
126 runtime.GC()
127 time.Sleep(time.Duration(i*100+10) * time.Millisecond)
128 // 1 pointer can remain on stack or elsewhere
129 if fin1 = atomic.LoadUint32(&fin); fin1 >= N-1 {
130 continue loop
133 t.Fatalf("only %v out of %v resources are finalized on try %v", fin1, N, try)
137 func TestPoolStress(t *testing.T) {
138 const P = 10
139 N := int(1e6)
140 if testing.Short() {
141 N /= 100
143 var p Pool
144 done := make(chan bool)
145 for i := 0; i < P; i++ {
146 go func() {
147 var v any = 0
148 for j := 0; j < N; j++ {
149 if v == nil {
150 v = 0
152 p.Put(v)
153 v = p.Get()
154 if v != nil && v.(int) != 0 {
155 t.Errorf("expect 0, got %v", v)
156 break
159 done <- true
162 for i := 0; i < P; i++ {
163 <-done
167 func TestPoolDequeue(t *testing.T) {
168 testPoolDequeue(t, NewPoolDequeue(16))
171 func TestPoolChain(t *testing.T) {
172 testPoolDequeue(t, NewPoolChain())
175 func testPoolDequeue(t *testing.T, d PoolDequeue) {
176 const P = 10
177 var N int = 2e6
178 if testing.Short() {
179 N = 1e3
181 have := make([]int32, N)
182 var stop int32
183 var wg WaitGroup
184 record := func(val int) {
185 atomic.AddInt32(&have[val], 1)
186 if val == N-1 {
187 atomic.StoreInt32(&stop, 1)
191 // Start P-1 consumers.
192 for i := 1; i < P; i++ {
193 wg.Add(1)
194 go func() {
195 fail := 0
196 for atomic.LoadInt32(&stop) == 0 {
197 val, ok := d.PopTail()
198 if ok {
199 fail = 0
200 record(val.(int))
201 } else {
202 // Speed up the test by
203 // allowing the pusher to run.
204 if fail++; fail%100 == 0 {
205 runtime.Gosched()
209 wg.Done()
213 // Start 1 producer.
214 nPopHead := 0
215 wg.Add(1)
216 go func() {
217 for j := 0; j < N; j++ {
218 for !d.PushHead(j) {
219 // Allow a popper to run.
220 runtime.Gosched()
222 if j%10 == 0 {
223 val, ok := d.PopHead()
224 if ok {
225 nPopHead++
226 record(val.(int))
230 wg.Done()
232 wg.Wait()
234 // Check results.
235 for i, count := range have {
236 if count != 1 {
237 t.Errorf("expected have[%d] = 1, got %d", i, count)
240 // Check that at least some PopHeads succeeded. We skip this
241 // check in short mode because it's common enough that the
242 // queue will stay nearly empty all the time and a PopTail
243 // will happen during the window between every PushHead and
244 // PopHead.
245 if !testing.Short() && nPopHead == 0 {
246 t.Errorf("popHead never succeeded")
250 func BenchmarkPool(b *testing.B) {
251 var p Pool
252 b.RunParallel(func(pb *testing.PB) {
253 for pb.Next() {
254 p.Put(1)
255 p.Get()
260 func BenchmarkPoolOverflow(b *testing.B) {
261 var p Pool
262 b.RunParallel(func(pb *testing.PB) {
263 for pb.Next() {
264 for b := 0; b < 100; b++ {
265 p.Put(1)
267 for b := 0; b < 100; b++ {
268 p.Get()
274 // Simulate object starvation in order to force Ps to steal objects
275 // from other Ps.
276 func BenchmarkPoolStarvation(b *testing.B) {
277 var p Pool
278 count := 100
279 // Reduce number of putted objects by 33 %. It creates objects starvation
280 // that force P-local storage to steal objects from other Ps.
281 countStarved := count - int(float32(count)*0.33)
282 b.RunParallel(func(pb *testing.PB) {
283 for pb.Next() {
284 for b := 0; b < countStarved; b++ {
285 p.Put(1)
287 for b := 0; b < count; b++ {
288 p.Get()
294 var globalSink any
296 func BenchmarkPoolSTW(b *testing.B) {
297 // Take control of GC.
298 defer debug.SetGCPercent(debug.SetGCPercent(-1))
300 var mstats runtime.MemStats
301 var pauses []uint64
303 var p Pool
304 for i := 0; i < b.N; i++ {
305 // Put a large number of items into a pool.
306 const N = 100000
307 var item any = 42
308 for i := 0; i < N; i++ {
309 p.Put(item)
311 // Do a GC.
312 runtime.GC()
313 // Record pause time.
314 runtime.ReadMemStats(&mstats)
315 pauses = append(pauses, mstats.PauseNs[(mstats.NumGC+255)%256])
318 // Get pause time stats.
319 sort.Slice(pauses, func(i, j int) bool { return pauses[i] < pauses[j] })
320 var total uint64
321 for _, ns := range pauses {
322 total += ns
324 // ns/op for this benchmark is average STW time.
325 b.ReportMetric(float64(total)/float64(b.N), "ns/op")
326 b.ReportMetric(float64(pauses[len(pauses)*95/100]), "p95-ns/STW")
327 b.ReportMetric(float64(pauses[len(pauses)*50/100]), "p50-ns/STW")
330 func BenchmarkPoolExpensiveNew(b *testing.B) {
331 // Populate a pool with items that are expensive to construct
332 // to stress pool cleanup and subsequent reconstruction.
334 // Create a ballast so the GC has a non-zero heap size and
335 // runs at reasonable times.
336 globalSink = make([]byte, 8<<20)
337 defer func() { globalSink = nil }()
339 // Create a pool that's "expensive" to fill.
340 var p Pool
341 var nNew uint64
342 p.New = func() any {
343 atomic.AddUint64(&nNew, 1)
344 time.Sleep(time.Millisecond)
345 return 42
347 var mstats1, mstats2 runtime.MemStats
348 runtime.ReadMemStats(&mstats1)
349 b.RunParallel(func(pb *testing.PB) {
350 // Simulate 100X the number of goroutines having items
351 // checked out from the Pool simultaneously.
352 items := make([]any, 100)
353 var sink []byte
354 for pb.Next() {
355 // Stress the pool.
356 for i := range items {
357 items[i] = p.Get()
358 // Simulate doing some work with this
359 // item checked out.
360 sink = make([]byte, 32<<10)
362 for i, v := range items {
363 p.Put(v)
364 items[i] = nil
367 _ = sink
369 runtime.ReadMemStats(&mstats2)
371 b.ReportMetric(float64(mstats2.NumGC-mstats1.NumGC)/float64(b.N), "GCs/op")
372 b.ReportMetric(float64(nNew)/float64(b.N), "New/op")