Use __builtin_memmove for trivially copyable types
[official-gcc.git] / libgo / go / sync / map_bench_test.go
blobe6a8badddbafca3a9369dcded986b3a84ac5aa3c
1 // Copyright 2016 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 package sync_test
7 import (
8 "fmt"
9 "reflect"
10 "sync"
11 "sync/atomic"
12 "testing"
15 type bench struct {
16 setup func(*testing.B, mapInterface)
17 perG func(b *testing.B, pb *testing.PB, i int, m mapInterface)
20 func benchMap(b *testing.B, bench bench) {
21 for _, m := range [...]mapInterface{&DeepCopyMap{}, &RWMutexMap{}, &sync.Map{}} {
22 b.Run(fmt.Sprintf("%T", m), func(b *testing.B) {
23 m = reflect.New(reflect.TypeOf(m).Elem()).Interface().(mapInterface)
24 if bench.setup != nil {
25 bench.setup(b, m)
28 b.ResetTimer()
30 var i int64
31 b.RunParallel(func(pb *testing.PB) {
32 id := int(atomic.AddInt64(&i, 1) - 1)
33 bench.perG(b, pb, id*b.N, m)
39 func BenchmarkLoadMostlyHits(b *testing.B) {
40 const hits, misses = 1023, 1
42 benchMap(b, bench{
43 setup: func(_ *testing.B, m mapInterface) {
44 for i := 0; i < hits; i++ {
45 m.LoadOrStore(i, i)
47 // Prime the map to get it into a steady state.
48 for i := 0; i < hits*2; i++ {
49 m.Load(i % hits)
53 perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
54 for ; pb.Next(); i++ {
55 m.Load(i % (hits + misses))
61 func BenchmarkLoadMostlyMisses(b *testing.B) {
62 const hits, misses = 1, 1023
64 benchMap(b, bench{
65 setup: func(_ *testing.B, m mapInterface) {
66 for i := 0; i < hits; i++ {
67 m.LoadOrStore(i, i)
69 // Prime the map to get it into a steady state.
70 for i := 0; i < hits*2; i++ {
71 m.Load(i % hits)
75 perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
76 for ; pb.Next(); i++ {
77 m.Load(i % (hits + misses))
83 func BenchmarkLoadOrStoreBalanced(b *testing.B) {
84 const hits, misses = 128, 128
86 benchMap(b, bench{
87 setup: func(b *testing.B, m mapInterface) {
88 if _, ok := m.(*DeepCopyMap); ok {
89 b.Skip("DeepCopyMap has quadratic running time.")
91 for i := 0; i < hits; i++ {
92 m.LoadOrStore(i, i)
94 // Prime the map to get it into a steady state.
95 for i := 0; i < hits*2; i++ {
96 m.Load(i % hits)
100 perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
101 for ; pb.Next(); i++ {
102 j := i % (hits + misses)
103 if j < hits {
104 if _, ok := m.LoadOrStore(j, i); !ok {
105 b.Fatalf("unexpected miss for %v", j)
107 } else {
108 if v, loaded := m.LoadOrStore(i, i); loaded {
109 b.Fatalf("failed to store %v: existing value %v", i, v)
117 func BenchmarkLoadOrStoreUnique(b *testing.B) {
118 benchMap(b, bench{
119 setup: func(b *testing.B, m mapInterface) {
120 if _, ok := m.(*DeepCopyMap); ok {
121 b.Skip("DeepCopyMap has quadratic running time.")
125 perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
126 for ; pb.Next(); i++ {
127 m.LoadOrStore(i, i)
133 func BenchmarkLoadOrStoreCollision(b *testing.B) {
134 benchMap(b, bench{
135 setup: func(_ *testing.B, m mapInterface) {
136 m.LoadOrStore(0, 0)
139 perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
140 for ; pb.Next(); i++ {
141 m.LoadOrStore(0, 0)
147 func BenchmarkRange(b *testing.B) {
148 const mapSize = 1 << 10
150 benchMap(b, bench{
151 setup: func(_ *testing.B, m mapInterface) {
152 for i := 0; i < mapSize; i++ {
153 m.Store(i, i)
157 perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
158 for ; pb.Next(); i++ {
159 m.Range(func(_, _ interface{}) bool { return true })
165 // BenchmarkAdversarialAlloc tests performance when we store a new value
166 // immediately whenever the map is promoted to clean and otherwise load a
167 // unique, missing key.
169 // This forces the Load calls to always acquire the map's mutex.
170 func BenchmarkAdversarialAlloc(b *testing.B) {
171 benchMap(b, bench{
172 perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
173 var stores, loadsSinceStore int64
174 for ; pb.Next(); i++ {
175 m.Load(i)
176 if loadsSinceStore++; loadsSinceStore > stores {
177 m.LoadOrStore(i, stores)
178 loadsSinceStore = 0
179 stores++
186 // BenchmarkAdversarialDelete tests performance when we periodically delete
187 // one key and add a different one in a large map.
189 // This forces the Load calls to always acquire the map's mutex and periodically
190 // makes a full copy of the map despite changing only one entry.
191 func BenchmarkAdversarialDelete(b *testing.B) {
192 const mapSize = 1 << 10
194 benchMap(b, bench{
195 setup: func(_ *testing.B, m mapInterface) {
196 for i := 0; i < mapSize; i++ {
197 m.Store(i, i)
201 perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
202 for ; pb.Next(); i++ {
203 m.Load(i)
205 if i%mapSize == 0 {
206 m.Range(func(k, _ interface{}) bool {
207 m.Delete(k)
208 return false
210 m.Store(i, i)