1 // Copyright 2011 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Large data benchmark.
6 // The JSON data is a summary of agl's changes in the
7 // go, webkit, and chromium open source projects.
8 // We benchmark converting between the JSON form
9 // and in-memory data structures.
27 type codeResponse
struct {
28 Tree
*codeNode
`json:"tree"`
29 Username
string `json:"username"`
32 type codeNode
struct {
33 Name
string `json:"name"`
34 Kids
[]*codeNode
`json:"kids"`
35 CLWeight
float64 `json:"cl_weight"`
36 Touches
int `json:"touches"`
37 MinT
int64 `json:"min_t"`
38 MaxT
int64 `json:"max_t"`
39 MeanT
int64 `json:"mean_t"`
43 var codeStruct codeResponse
46 f
, err
:= os
.Open("testdata/code.json.gz")
51 gz
, err
:= gzip
.NewReader(f
)
55 data
, err
:= io
.ReadAll(gz
)
62 if err
:= Unmarshal(codeJSON
, &codeStruct
); err
!= nil {
63 panic("unmarshal code.json: " + err
.Error())
66 if data
, err
= Marshal(&codeStruct
); err
!= nil {
67 panic("marshal code.json: " + err
.Error())
70 if !bytes
.Equal(data
, codeJSON
) {
71 println("different lengths", len(data
), len(codeJSON
))
72 for i
:= 0; i
< len(data
) && i
< len(codeJSON
); i
++ {
73 if data
[i
] != codeJSON
[i
] {
74 println("re-marshal: changed at byte", i
)
75 println("orig: ", string(codeJSON
[i
-10:i
+10]))
76 println("new: ", string(data
[i
-10:i
+10]))
80 panic("re-marshal code.json: different result")
84 func BenchmarkCodeEncoder(b
*testing
.B
) {
91 b
.RunParallel(func(pb
*testing
.PB
) {
92 enc
:= NewEncoder(io
.Discard
)
94 if err
:= enc
.Encode(&codeStruct
); err
!= nil {
95 b
.Fatal("Encode:", err
)
99 b
.SetBytes(int64(len(codeJSON
)))
102 func BenchmarkCodeMarshal(b
*testing
.B
) {
109 b
.RunParallel(func(pb
*testing
.PB
) {
111 if _
, err
:= Marshal(&codeStruct
); err
!= nil {
112 b
.Fatal("Marshal:", err
)
116 b
.SetBytes(int64(len(codeJSON
)))
119 func benchMarshalBytes(n
int) func(*testing
.B
) {
120 sample
:= []byte("hello world")
121 // Use a struct pointer, to avoid an allocation when passing it as an
122 // interface parameter to Marshal.
126 bytes
.Repeat(sample
, (n
/len(sample
))+1)[:n
],
128 return func(b
*testing
.B
) {
129 for i
:= 0; i
< b
.N
; i
++ {
130 if _
, err
:= Marshal(v
); err
!= nil {
131 b
.Fatal("Marshal:", err
)
137 func BenchmarkMarshalBytes(b
*testing
.B
) {
139 // 32 fits within encodeState.scratch.
140 b
.Run("32", benchMarshalBytes(32))
141 // 256 doesn't fit in encodeState.scratch, but is small enough to
142 // allocate and avoid the slower base64.NewEncoder.
143 b
.Run("256", benchMarshalBytes(256))
144 // 4096 is large enough that we want to avoid allocating for it.
145 b
.Run("4096", benchMarshalBytes(4096))
148 func BenchmarkCodeDecoder(b
*testing
.B
) {
155 b
.RunParallel(func(pb
*testing
.PB
) {
157 dec
:= NewDecoder(&buf
)
165 if err
:= dec
.Decode(&r
); err
!= nil {
166 b
.Fatal("Decode:", err
)
170 b
.SetBytes(int64(len(codeJSON
)))
173 func BenchmarkUnicodeDecoder(b
*testing
.B
) {
175 j
:= []byte(`"\uD83D\uDE01"`)
176 b
.SetBytes(int64(len(j
)))
177 r
:= bytes
.NewReader(j
)
181 for i
:= 0; i
< b
.N
; i
++ {
182 if err
:= dec
.Decode(&out
); err
!= nil {
183 b
.Fatal("Decode:", err
)
189 func BenchmarkDecoderStream(b
*testing
.B
) {
193 dec
:= NewDecoder(&buf
)
194 buf
.WriteString(`"` + strings
.Repeat("x", 1000000) + `"` + "\n\n\n")
196 if err
:= dec
.Decode(&x
); err
!= nil {
197 b
.Fatal("Decode:", err
)
199 ones
:= strings
.Repeat(" 1\n", 300000) + "\n\n\n"
201 for i
:= 0; i
< b
.N
; i
++ {
203 buf
.WriteString(ones
)
206 if err
:= dec
.Decode(&x
); err
!= nil || x
!= 1.0 {
207 b
.Fatalf("Decode: %v after %d", err
, i
)
212 func BenchmarkCodeUnmarshal(b
*testing
.B
) {
219 b
.RunParallel(func(pb
*testing
.PB
) {
222 if err
:= Unmarshal(codeJSON
, &r
); err
!= nil {
223 b
.Fatal("Unmarshal:", err
)
227 b
.SetBytes(int64(len(codeJSON
)))
230 func BenchmarkCodeUnmarshalReuse(b
*testing
.B
) {
237 b
.RunParallel(func(pb
*testing
.PB
) {
240 if err
:= Unmarshal(codeJSON
, &r
); err
!= nil {
241 b
.Fatal("Unmarshal:", err
)
245 b
.SetBytes(int64(len(codeJSON
)))
248 func BenchmarkUnmarshalString(b
*testing
.B
) {
250 data
:= []byte(`"hello, world"`)
251 b
.RunParallel(func(pb
*testing
.PB
) {
254 if err
:= Unmarshal(data
, &s
); err
!= nil {
255 b
.Fatal("Unmarshal:", err
)
261 func BenchmarkUnmarshalFloat64(b
*testing
.B
) {
263 data
:= []byte(`3.14`)
264 b
.RunParallel(func(pb
*testing
.PB
) {
267 if err
:= Unmarshal(data
, &f
); err
!= nil {
268 b
.Fatal("Unmarshal:", err
)
274 func BenchmarkUnmarshalInt64(b
*testing
.B
) {
277 b
.RunParallel(func(pb
*testing
.PB
) {
280 if err
:= Unmarshal(data
, &x
); err
!= nil {
281 b
.Fatal("Unmarshal:", err
)
287 func BenchmarkIssue10335(b
*testing
.B
) {
289 j
:= []byte(`{"a":{ }}`)
290 b
.RunParallel(func(pb
*testing
.PB
) {
293 if err
:= Unmarshal(j
, &s
); err
!= nil {
300 func BenchmarkIssue34127(b
*testing
.B
) {
303 Bar
string `json:"bar,string"`
307 b
.RunParallel(func(pb
*testing
.PB
) {
309 if _
, err
:= Marshal(&j
); err
!= nil {
316 func BenchmarkUnmapped(b
*testing
.B
) {
318 j
:= []byte(`{"s": "hello", "y": 2, "o": {"x": 0}, "a": [1, 99, {"x": 1}]}`)
319 b
.RunParallel(func(pb
*testing
.PB
) {
322 if err
:= Unmarshal(j
, &s
); err
!= nil {
329 func BenchmarkTypeFieldsCache(b
*testing
.B
) {
331 var maxTypes
int = 1e6
332 if testenv
.Builder() != "" {
333 maxTypes
= 1e3
// restrict cache sizes on builders
336 // Dynamically generate many new types.
337 types
:= make([]reflect
.Type
, maxTypes
)
338 fs
:= []reflect
.StructField
{{
339 Type
: reflect
.TypeOf(""),
342 for i
:= range types
{
343 fs
[0].Name
= fmt
.Sprintf("TypeFieldsCache%d", i
)
344 types
[i
] = reflect
.StructOf(fs
)
347 // clearClear clears the cache. Other JSON operations, must not be running.
348 clearCache
:= func() {
349 fieldCache
= sync
.Map
{}
352 // MissTypes tests the performance of repeated cache misses.
353 // This measures the time to rebuild a cache of size nt.
354 for nt
:= 1; nt
<= maxTypes
; nt
*= 10 {
356 b
.Run(fmt
.Sprintf("MissTypes%d", nt
), func(b
*testing
.B
) {
357 nc
:= runtime
.GOMAXPROCS(0)
358 for i
:= 0; i
< b
.N
; i
++ {
360 var wg sync
.WaitGroup
361 for j
:= 0; j
< nc
; j
++ {
364 for _
, t
:= range ts
[(j
*len(ts
))/nc
: ((j
+1)*len(ts
))/nc
] {
375 // HitTypes tests the performance of repeated cache hits.
376 // This measures the average time of each cache lookup.
377 for nt
:= 1; nt
<= maxTypes
; nt
*= 10 {
378 // Pre-warm a cache of size nt.
380 for _
, t
:= range types
[:nt
] {
383 b
.Run(fmt
.Sprintf("HitTypes%d", nt
), func(b
*testing
.B
) {
384 b
.RunParallel(func(pb
*testing
.PB
) {
386 cachedTypeFields(types
[0])
393 func BenchmarkEncodeMarshaler(b
*testing
.B
) {
401 b
.RunParallel(func(pb
*testing
.PB
) {
402 enc
:= NewEncoder(io
.Discard
)
405 if err
:= enc
.Encode(&m
); err
!= nil {
406 b
.Fatal("Encode:", err
)