[1/7] Preprocessor cleanup
[official-gcc.git] / libgo / go / runtime / slice_test.go
blobc2dfb7afd19b8f0ad6940ee2aeef0ee9f8157018
1 // Copyright 2011 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
4 package runtime_test
6 import (
7 "fmt"
8 "testing"
11 const N = 20
13 func BenchmarkMakeSlice(b *testing.B) {
14 var x []byte
15 for i := 0; i < b.N; i++ {
16 x = make([]byte, 32)
17 _ = x
21 type (
22 struct24 struct{ a, b, c int64 }
23 struct32 struct{ a, b, c, d int64 }
24 struct40 struct{ a, b, c, d, e int64 }
27 func BenchmarkGrowSlice(b *testing.B) {
28 b.Run("Byte", func(b *testing.B) {
29 x := make([]byte, 9)
30 for i := 0; i < b.N; i++ {
31 _ = append([]byte(nil), x...)
34 b.Run("Int16", func(b *testing.B) {
35 x := make([]int16, 9)
36 for i := 0; i < b.N; i++ {
37 _ = append([]int16(nil), x...)
40 b.Run("Int", func(b *testing.B) {
41 x := make([]int, 9)
42 for i := 0; i < b.N; i++ {
43 _ = append([]int(nil), x...)
46 b.Run("Ptr", func(b *testing.B) {
47 x := make([]*byte, 9)
48 for i := 0; i < b.N; i++ {
49 _ = append([]*byte(nil), x...)
52 b.Run("Struct", func(b *testing.B) {
53 b.Run("24", func(b *testing.B) {
54 x := make([]struct24, 9)
55 for i := 0; i < b.N; i++ {
56 _ = append([]struct24(nil), x...)
59 b.Run("32", func(b *testing.B) {
60 x := make([]struct32, 9)
61 for i := 0; i < b.N; i++ {
62 _ = append([]struct32(nil), x...)
65 b.Run("40", func(b *testing.B) {
66 x := make([]struct40, 9)
67 for i := 0; i < b.N; i++ {
68 _ = append([]struct40(nil), x...)
75 var (
76 SinkIntSlice []int
77 SinkIntPointerSlice []*int
80 func BenchmarkExtendSlice(b *testing.B) {
81 var length = 4 // Use a variable to prevent stack allocation of slices.
82 b.Run("IntSlice", func(b *testing.B) {
83 s := make([]int, 0, length)
84 for i := 0; i < b.N; i++ {
85 s = append(s[:0:length/2], make([]int, length)...)
87 SinkIntSlice = s
89 b.Run("PointerSlice", func(b *testing.B) {
90 s := make([]*int, 0, length)
91 for i := 0; i < b.N; i++ {
92 s = append(s[:0:length/2], make([]*int, length)...)
94 SinkIntPointerSlice = s
96 b.Run("NoGrow", func(b *testing.B) {
97 s := make([]int, 0, length)
98 for i := 0; i < b.N; i++ {
99 s = append(s[:0:length], make([]int, length)...)
101 SinkIntSlice = s
105 func BenchmarkAppend(b *testing.B) {
106 b.StopTimer()
107 x := make([]int, 0, N)
108 b.StartTimer()
109 for i := 0; i < b.N; i++ {
110 x = x[0:0]
111 for j := 0; j < N; j++ {
112 x = append(x, j)
117 func BenchmarkAppendGrowByte(b *testing.B) {
118 for i := 0; i < b.N; i++ {
119 var x []byte
120 for j := 0; j < 1<<20; j++ {
121 x = append(x, byte(j))
126 func BenchmarkAppendGrowString(b *testing.B) {
127 var s string
128 for i := 0; i < b.N; i++ {
129 var x []string
130 for j := 0; j < 1<<20; j++ {
131 x = append(x, s)
136 func BenchmarkAppendSlice(b *testing.B) {
137 for _, length := range []int{1, 4, 7, 8, 15, 16, 32} {
138 b.Run(fmt.Sprint(length, "Bytes"), func(b *testing.B) {
139 x := make([]byte, 0, N)
140 y := make([]byte, length)
141 for i := 0; i < b.N; i++ {
142 x = x[0:0]
143 x = append(x, y...)
149 var (
150 blackhole []byte
153 func BenchmarkAppendSliceLarge(b *testing.B) {
154 for _, length := range []int{1 << 10, 4 << 10, 16 << 10, 64 << 10, 256 << 10, 1024 << 10} {
155 y := make([]byte, length)
156 b.Run(fmt.Sprint(length, "Bytes"), func(b *testing.B) {
157 for i := 0; i < b.N; i++ {
158 blackhole = nil
159 blackhole = append(blackhole, y...)
165 func BenchmarkAppendStr(b *testing.B) {
166 for _, str := range []string{
167 "1",
168 "1234",
169 "12345678",
170 "1234567890123456",
171 "12345678901234567890123456789012",
173 b.Run(fmt.Sprint(len(str), "Bytes"), func(b *testing.B) {
174 x := make([]byte, 0, N)
175 for i := 0; i < b.N; i++ {
176 x = x[0:0]
177 x = append(x, str...)
183 func BenchmarkAppendSpecialCase(b *testing.B) {
184 b.StopTimer()
185 x := make([]int, 0, N)
186 b.StartTimer()
187 for i := 0; i < b.N; i++ {
188 x = x[0:0]
189 for j := 0; j < N; j++ {
190 if len(x) < cap(x) {
191 x = x[:len(x)+1]
192 x[len(x)-1] = j
193 } else {
194 x = append(x, j)
200 var x []int
202 func f() int {
203 x[:1][0] = 3
204 return 2
207 func TestSideEffectOrder(t *testing.T) {
208 x = make([]int, 0, 10)
209 x = append(x, 1, f())
210 if x[0] != 1 || x[1] != 2 {
211 t.Error("append failed: ", x[0], x[1])
215 func TestAppendOverlap(t *testing.T) {
216 x := []byte("1234")
217 x = append(x[1:], x...) // p > q in runtimeĀ·appendslice.
218 got := string(x)
219 want := "2341234"
220 if got != want {
221 t.Errorf("overlap failed: got %q want %q", got, want)
225 func BenchmarkCopy(b *testing.B) {
226 for _, l := range []int{1, 2, 4, 8, 12, 16, 32, 128, 1024} {
227 buf := make([]byte, 4096)
228 b.Run(fmt.Sprint(l, "Byte"), func(b *testing.B) {
229 s := make([]byte, l)
230 var n int
231 for i := 0; i < b.N; i++ {
232 n = copy(buf, s)
234 b.SetBytes(int64(n))
236 b.Run(fmt.Sprint(l, "String"), func(b *testing.B) {
237 s := string(make([]byte, l))
238 var n int
239 for i := 0; i < b.N; i++ {
240 n = copy(buf, s)
242 b.SetBytes(int64(n))
247 var (
248 sByte []byte
249 s1Ptr []uintptr
250 s2Ptr [][2]uintptr
251 s3Ptr [][3]uintptr
252 s4Ptr [][4]uintptr
255 // BenchmarkAppendInPlace tests the performance of append
256 // when the result is being written back to the same slice.
257 // In order for the in-place optimization to occur,
258 // the slice must be referred to by address;
259 // using a global is an easy way to trigger that.
260 // We test the "grow" and "no grow" paths separately,
261 // but not the "normal" (occasionally grow) path,
262 // because it is a blend of the other two.
263 // We use small numbers and small sizes in an attempt
264 // to avoid benchmarking memory allocation and copying.
265 // We use scalars instead of pointers in an attempt
266 // to avoid benchmarking the write barriers.
267 // We benchmark four common sizes (byte, pointer, string/interface, slice),
268 // and one larger size.
269 func BenchmarkAppendInPlace(b *testing.B) {
270 b.Run("NoGrow", func(b *testing.B) {
271 const C = 128
273 b.Run("Byte", func(b *testing.B) {
274 for i := 0; i < b.N; i++ {
275 sByte = make([]byte, C)
276 for j := 0; j < C; j++ {
277 sByte = append(sByte, 0x77)
282 b.Run("1Ptr", func(b *testing.B) {
283 for i := 0; i < b.N; i++ {
284 s1Ptr = make([]uintptr, C)
285 for j := 0; j < C; j++ {
286 s1Ptr = append(s1Ptr, 0x77)
291 b.Run("2Ptr", func(b *testing.B) {
292 for i := 0; i < b.N; i++ {
293 s2Ptr = make([][2]uintptr, C)
294 for j := 0; j < C; j++ {
295 s2Ptr = append(s2Ptr, [2]uintptr{0x77, 0x88})
300 b.Run("3Ptr", func(b *testing.B) {
301 for i := 0; i < b.N; i++ {
302 s3Ptr = make([][3]uintptr, C)
303 for j := 0; j < C; j++ {
304 s3Ptr = append(s3Ptr, [3]uintptr{0x77, 0x88, 0x99})
309 b.Run("4Ptr", func(b *testing.B) {
310 for i := 0; i < b.N; i++ {
311 s4Ptr = make([][4]uintptr, C)
312 for j := 0; j < C; j++ {
313 s4Ptr = append(s4Ptr, [4]uintptr{0x77, 0x88, 0x99, 0xAA})
320 b.Run("Grow", func(b *testing.B) {
321 const C = 5
323 b.Run("Byte", func(b *testing.B) {
324 for i := 0; i < b.N; i++ {
325 sByte = make([]byte, 0)
326 for j := 0; j < C; j++ {
327 sByte = append(sByte, 0x77)
328 sByte = sByte[:cap(sByte)]
333 b.Run("1Ptr", func(b *testing.B) {
334 for i := 0; i < b.N; i++ {
335 s1Ptr = make([]uintptr, 0)
336 for j := 0; j < C; j++ {
337 s1Ptr = append(s1Ptr, 0x77)
338 s1Ptr = s1Ptr[:cap(s1Ptr)]
343 b.Run("2Ptr", func(b *testing.B) {
344 for i := 0; i < b.N; i++ {
345 s2Ptr = make([][2]uintptr, 0)
346 for j := 0; j < C; j++ {
347 s2Ptr = append(s2Ptr, [2]uintptr{0x77, 0x88})
348 s2Ptr = s2Ptr[:cap(s2Ptr)]
353 b.Run("3Ptr", func(b *testing.B) {
354 for i := 0; i < b.N; i++ {
355 s3Ptr = make([][3]uintptr, 0)
356 for j := 0; j < C; j++ {
357 s3Ptr = append(s3Ptr, [3]uintptr{0x77, 0x88, 0x99})
358 s3Ptr = s3Ptr[:cap(s3Ptr)]
363 b.Run("4Ptr", func(b *testing.B) {
364 for i := 0; i < b.N; i++ {
365 s4Ptr = make([][4]uintptr, 0)
366 for j := 0; j < C; j++ {
367 s4Ptr = append(s4Ptr, [4]uintptr{0x77, 0x88, 0x99, 0xAA})
368 s4Ptr = s4Ptr[:cap(s4Ptr)]