1 // Copyright 2016 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
16 setup
func(*testing
.B
, mapInterface
)
17 perG
func(b
*testing
.B
, pb
*testing
.PB
, i
int, m mapInterface
)
20 func benchMap(b
*testing
.B
, bench bench
) {
21 for _
, m
:= range [...]mapInterface
{&DeepCopyMap
{}, &RWMutexMap
{}, &sync
.Map
{}} {
22 b
.Run(fmt
.Sprintf("%T", m
), func(b
*testing
.B
) {
23 m
= reflect
.New(reflect
.TypeOf(m
).Elem()).Interface().(mapInterface
)
24 if bench
.setup
!= nil {
31 b
.RunParallel(func(pb
*testing
.PB
) {
32 id
:= int(atomic
.AddInt64(&i
, 1) - 1)
33 bench
.perG(b
, pb
, id
*b
.N
, m
)
39 func BenchmarkLoadMostlyHits(b
*testing
.B
) {
40 const hits
, misses
= 1023, 1
43 setup
: func(_
*testing
.B
, m mapInterface
) {
44 for i
:= 0; i
< hits
; i
++ {
47 // Prime the map to get it into a steady state.
48 for i
:= 0; i
< hits
*2; i
++ {
53 perG
: func(b
*testing
.B
, pb
*testing
.PB
, i
int, m mapInterface
) {
54 for ; pb
.Next(); i
++ {
55 m
.Load(i
% (hits
+ misses
))
61 func BenchmarkLoadMostlyMisses(b
*testing
.B
) {
62 const hits
, misses
= 1, 1023
65 setup
: func(_
*testing
.B
, m mapInterface
) {
66 for i
:= 0; i
< hits
; i
++ {
69 // Prime the map to get it into a steady state.
70 for i
:= 0; i
< hits
*2; i
++ {
75 perG
: func(b
*testing
.B
, pb
*testing
.PB
, i
int, m mapInterface
) {
76 for ; pb
.Next(); i
++ {
77 m
.Load(i
% (hits
+ misses
))
83 func BenchmarkLoadOrStoreBalanced(b
*testing
.B
) {
84 const hits
, misses
= 128, 128
87 setup
: func(b
*testing
.B
, m mapInterface
) {
88 if _
, ok
:= m
.(*DeepCopyMap
); ok
{
89 b
.Skip("DeepCopyMap has quadratic running time.")
91 for i
:= 0; i
< hits
; i
++ {
94 // Prime the map to get it into a steady state.
95 for i
:= 0; i
< hits
*2; i
++ {
100 perG
: func(b
*testing
.B
, pb
*testing
.PB
, i
int, m mapInterface
) {
101 for ; pb
.Next(); i
++ {
102 j
:= i
% (hits
+ misses
)
104 if _
, ok
:= m
.LoadOrStore(j
, i
); !ok
{
105 b
.Fatalf("unexpected miss for %v", j
)
108 if v
, loaded
:= m
.LoadOrStore(i
, i
); loaded
{
109 b
.Fatalf("failed to store %v: existing value %v", i
, v
)
117 func BenchmarkLoadOrStoreUnique(b
*testing
.B
) {
119 setup
: func(b
*testing
.B
, m mapInterface
) {
120 if _
, ok
:= m
.(*DeepCopyMap
); ok
{
121 b
.Skip("DeepCopyMap has quadratic running time.")
125 perG
: func(b
*testing
.B
, pb
*testing
.PB
, i
int, m mapInterface
) {
126 for ; pb
.Next(); i
++ {
133 func BenchmarkLoadOrStoreCollision(b
*testing
.B
) {
135 setup
: func(_
*testing
.B
, m mapInterface
) {
139 perG
: func(b
*testing
.B
, pb
*testing
.PB
, i
int, m mapInterface
) {
140 for ; pb
.Next(); i
++ {
147 func BenchmarkLoadAndDeleteBalanced(b
*testing
.B
) {
148 const hits
, misses
= 128, 128
151 setup
: func(b
*testing
.B
, m mapInterface
) {
152 if _
, ok
:= m
.(*DeepCopyMap
); ok
{
153 b
.Skip("DeepCopyMap has quadratic running time.")
155 for i
:= 0; i
< hits
; i
++ {
158 // Prime the map to get it into a steady state.
159 for i
:= 0; i
< hits
*2; i
++ {
164 perG
: func(b
*testing
.B
, pb
*testing
.PB
, i
int, m mapInterface
) {
165 for ; pb
.Next(); i
++ {
166 j
:= i
% (hits
+ misses
)
177 func BenchmarkLoadAndDeleteUnique(b
*testing
.B
) {
179 setup
: func(b
*testing
.B
, m mapInterface
) {
180 if _
, ok
:= m
.(*DeepCopyMap
); ok
{
181 b
.Skip("DeepCopyMap has quadratic running time.")
185 perG
: func(b
*testing
.B
, pb
*testing
.PB
, i
int, m mapInterface
) {
186 for ; pb
.Next(); i
++ {
193 func BenchmarkLoadAndDeleteCollision(b
*testing
.B
) {
195 setup
: func(_
*testing
.B
, m mapInterface
) {
199 perG
: func(b
*testing
.B
, pb
*testing
.PB
, i
int, m mapInterface
) {
200 for ; pb
.Next(); i
++ {
207 func BenchmarkRange(b
*testing
.B
) {
208 const mapSize
= 1 << 10
211 setup
: func(_
*testing
.B
, m mapInterface
) {
212 for i
:= 0; i
< mapSize
; i
++ {
217 perG
: func(b
*testing
.B
, pb
*testing
.PB
, i
int, m mapInterface
) {
218 for ; pb
.Next(); i
++ {
219 m
.Range(func(_
, _ any
) bool { return true })
225 // BenchmarkAdversarialAlloc tests performance when we store a new value
226 // immediately whenever the map is promoted to clean and otherwise load a
227 // unique, missing key.
229 // This forces the Load calls to always acquire the map's mutex.
230 func BenchmarkAdversarialAlloc(b
*testing
.B
) {
232 perG
: func(b
*testing
.B
, pb
*testing
.PB
, i
int, m mapInterface
) {
233 var stores
, loadsSinceStore
int64
234 for ; pb
.Next(); i
++ {
236 if loadsSinceStore
++; loadsSinceStore
> stores
{
237 m
.LoadOrStore(i
, stores
)
246 // BenchmarkAdversarialDelete tests performance when we periodically delete
247 // one key and add a different one in a large map.
249 // This forces the Load calls to always acquire the map's mutex and periodically
250 // makes a full copy of the map despite changing only one entry.
251 func BenchmarkAdversarialDelete(b
*testing
.B
) {
252 const mapSize
= 1 << 10
255 setup
: func(_
*testing
.B
, m mapInterface
) {
256 for i
:= 0; i
< mapSize
; i
++ {
261 perG
: func(b
*testing
.B
, pb
*testing
.PB
, i
int, m mapInterface
) {
262 for ; pb
.Next(); i
++ {
266 m
.Range(func(k
, _ any
) bool {
277 func BenchmarkDeleteCollision(b
*testing
.B
) {
279 setup
: func(_
*testing
.B
, m mapInterface
) {
283 perG
: func(b
*testing
.B
, pb
*testing
.PB
, i
int, m mapInterface
) {
284 for ; pb
.Next(); i
++ {