* builtins.def (BUILT_IN_SETJMP): Revert latest change.
[official-gcc.git] / libgo / go / runtime / memmove_test.go
blob74b8753b5f78f4f22f53346c2ce0b3f9113f2980
1 // Copyright 2013 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 package runtime_test
7 import (
8 "crypto/rand"
9 "encoding/binary"
10 "fmt"
11 "internal/race"
12 . "runtime"
13 "testing"
16 func TestMemmove(t *testing.T) {
17 t.Parallel()
18 size := 256
19 if testing.Short() {
20 size = 128 + 16
22 src := make([]byte, size)
23 dst := make([]byte, size)
24 for i := 0; i < size; i++ {
25 src[i] = byte(128 + (i & 127))
27 for i := 0; i < size; i++ {
28 dst[i] = byte(i & 127)
30 for n := 0; n <= size; n++ {
31 for x := 0; x <= size-n; x++ { // offset in src
32 for y := 0; y <= size-n; y++ { // offset in dst
33 copy(dst[y:y+n], src[x:x+n])
34 for i := 0; i < y; i++ {
35 if dst[i] != byte(i&127) {
36 t.Fatalf("prefix dst[%d] = %d", i, dst[i])
39 for i := y; i < y+n; i++ {
40 if dst[i] != byte(128+((i-y+x)&127)) {
41 t.Fatalf("copied dst[%d] = %d", i, dst[i])
43 dst[i] = byte(i & 127) // reset dst
45 for i := y + n; i < size; i++ {
46 if dst[i] != byte(i&127) {
47 t.Fatalf("suffix dst[%d] = %d", i, dst[i])
55 func TestMemmoveAlias(t *testing.T) {
56 t.Parallel()
57 size := 256
58 if testing.Short() {
59 size = 128 + 16
61 buf := make([]byte, size)
62 for i := 0; i < size; i++ {
63 buf[i] = byte(i)
65 for n := 0; n <= size; n++ {
66 for x := 0; x <= size-n; x++ { // src offset
67 for y := 0; y <= size-n; y++ { // dst offset
68 copy(buf[y:y+n], buf[x:x+n])
69 for i := 0; i < y; i++ {
70 if buf[i] != byte(i) {
71 t.Fatalf("prefix buf[%d] = %d", i, buf[i])
74 for i := y; i < y+n; i++ {
75 if buf[i] != byte(i-y+x) {
76 t.Fatalf("copied buf[%d] = %d", i, buf[i])
78 buf[i] = byte(i) // reset buf
80 for i := y + n; i < size; i++ {
81 if buf[i] != byte(i) {
82 t.Fatalf("suffix buf[%d] = %d", i, buf[i])
90 func TestMemmoveLarge0x180000(t *testing.T) {
91 t.Parallel()
92 if race.Enabled {
93 t.Skip("skipping large memmove test under race detector")
95 testSize(t, 0x180000)
98 func TestMemmoveOverlapLarge0x120000(t *testing.T) {
99 t.Parallel()
100 if race.Enabled {
101 t.Skip("skipping large memmove test under race detector")
103 testOverlap(t, 0x120000)
106 func testSize(t *testing.T, size int) {
107 src := make([]byte, size)
108 dst := make([]byte, size)
109 _, _ = rand.Read(src)
110 _, _ = rand.Read(dst)
112 ref := make([]byte, size)
113 copyref(ref, dst)
115 for n := size - 50; n > 1; n >>= 1 {
116 for x := 0; x <= size-n; x = x*7 + 1 { // offset in src
117 for y := 0; y <= size-n; y = y*9 + 1 { // offset in dst
118 copy(dst[y:y+n], src[x:x+n])
119 copyref(ref[y:y+n], src[x:x+n])
120 p := cmpb(dst, ref)
121 if p >= 0 {
122 t.Fatalf("Copy failed, copying from src[%d:%d] to dst[%d:%d].\nOffset %d is different, %v != %v", x, x+n, y, y+n, p, dst[p], ref[p])
129 func testOverlap(t *testing.T, size int) {
130 src := make([]byte, size)
131 test := make([]byte, size)
132 ref := make([]byte, size)
133 _, _ = rand.Read(src)
135 for n := size - 50; n > 1; n >>= 1 {
136 for x := 0; x <= size-n; x = x*7 + 1 { // offset in src
137 for y := 0; y <= size-n; y = y*9 + 1 { // offset in dst
138 // Reset input
139 copyref(test, src)
140 copyref(ref, src)
141 copy(test[y:y+n], test[x:x+n])
142 if y <= x {
143 copyref(ref[y:y+n], ref[x:x+n])
144 } else {
145 copybw(ref[y:y+n], ref[x:x+n])
147 p := cmpb(test, ref)
148 if p >= 0 {
149 t.Fatalf("Copy failed, copying from src[%d:%d] to dst[%d:%d].\nOffset %d is different, %v != %v", x, x+n, y, y+n, p, test[p], ref[p])
157 // Forward copy.
158 func copyref(dst, src []byte) {
159 for i, v := range src {
160 dst[i] = v
164 // Backwards copy
165 func copybw(dst, src []byte) {
166 if len(src) == 0 {
167 return
169 for i := len(src) - 1; i >= 0; i-- {
170 dst[i] = src[i]
174 // Returns offset of difference
175 func matchLen(a, b []byte, max int) int {
176 a = a[:max]
177 b = b[:max]
178 for i, av := range a {
179 if b[i] != av {
180 return i
183 return max
186 func cmpb(a, b []byte) int {
187 l := matchLen(a, b, len(a))
188 if l == len(a) {
189 return -1
191 return l
194 func benchmarkSizes(b *testing.B, sizes []int, fn func(b *testing.B, n int)) {
195 for _, n := range sizes {
196 b.Run(fmt.Sprint(n), func(b *testing.B) {
197 b.SetBytes(int64(n))
198 fn(b, n)
203 var bufSizes = []int{
204 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
205 32, 64, 128, 256, 512, 1024, 2048, 4096,
208 func BenchmarkMemmove(b *testing.B) {
209 benchmarkSizes(b, bufSizes, func(b *testing.B, n int) {
210 x := make([]byte, n)
211 y := make([]byte, n)
212 for i := 0; i < b.N; i++ {
213 copy(x, y)
218 func BenchmarkMemmoveUnalignedDst(b *testing.B) {
219 benchmarkSizes(b, bufSizes, func(b *testing.B, n int) {
220 x := make([]byte, n+1)
221 y := make([]byte, n)
222 for i := 0; i < b.N; i++ {
223 copy(x[1:], y)
228 func BenchmarkMemmoveUnalignedSrc(b *testing.B) {
229 benchmarkSizes(b, bufSizes, func(b *testing.B, n int) {
230 x := make([]byte, n)
231 y := make([]byte, n+1)
232 for i := 0; i < b.N; i++ {
233 copy(x, y[1:])
238 func TestMemclr(t *testing.T) {
239 size := 512
240 if testing.Short() {
241 size = 128 + 16
243 mem := make([]byte, size)
244 for i := 0; i < size; i++ {
245 mem[i] = 0xee
247 for n := 0; n < size; n++ {
248 for x := 0; x <= size-n; x++ { // offset in mem
249 MemclrBytes(mem[x : x+n])
250 for i := 0; i < x; i++ {
251 if mem[i] != 0xee {
252 t.Fatalf("overwrite prefix mem[%d] = %d", i, mem[i])
255 for i := x; i < x+n; i++ {
256 if mem[i] != 0 {
257 t.Fatalf("failed clear mem[%d] = %d", i, mem[i])
259 mem[i] = 0xee
261 for i := x + n; i < size; i++ {
262 if mem[i] != 0xee {
263 t.Fatalf("overwrite suffix mem[%d] = %d", i, mem[i])
270 func BenchmarkMemclr(b *testing.B) {
271 for _, n := range []int{5, 16, 64, 256, 4096, 65536} {
272 x := make([]byte, n)
273 b.Run(fmt.Sprint(n), func(b *testing.B) {
274 b.SetBytes(int64(n))
275 for i := 0; i < b.N; i++ {
276 MemclrBytes(x)
280 for _, m := range []int{1, 4, 8, 16, 64} {
281 x := make([]byte, m<<20)
282 b.Run(fmt.Sprint(m, "M"), func(b *testing.B) {
283 b.SetBytes(int64(m << 20))
284 for i := 0; i < b.N; i++ {
285 MemclrBytes(x)
291 func BenchmarkGoMemclr(b *testing.B) {
292 benchmarkSizes(b, []int{5, 16, 64, 256}, func(b *testing.B, n int) {
293 x := make([]byte, n)
294 for i := 0; i < b.N; i++ {
295 for j := range x {
296 x[j] = 0
302 func BenchmarkClearFat8(b *testing.B) {
303 for i := 0; i < b.N; i++ {
304 var x [8 / 4]uint32
305 _ = x
308 func BenchmarkClearFat12(b *testing.B) {
309 for i := 0; i < b.N; i++ {
310 var x [12 / 4]uint32
311 _ = x
314 func BenchmarkClearFat16(b *testing.B) {
315 for i := 0; i < b.N; i++ {
316 var x [16 / 4]uint32
317 _ = x
320 func BenchmarkClearFat24(b *testing.B) {
321 for i := 0; i < b.N; i++ {
322 var x [24 / 4]uint32
323 _ = x
326 func BenchmarkClearFat32(b *testing.B) {
327 for i := 0; i < b.N; i++ {
328 var x [32 / 4]uint32
329 _ = x
332 func BenchmarkClearFat40(b *testing.B) {
333 for i := 0; i < b.N; i++ {
334 var x [40 / 4]uint32
335 _ = x
338 func BenchmarkClearFat48(b *testing.B) {
339 for i := 0; i < b.N; i++ {
340 var x [48 / 4]uint32
341 _ = x
344 func BenchmarkClearFat56(b *testing.B) {
345 for i := 0; i < b.N; i++ {
346 var x [56 / 4]uint32
347 _ = x
350 func BenchmarkClearFat64(b *testing.B) {
351 for i := 0; i < b.N; i++ {
352 var x [64 / 4]uint32
353 _ = x
356 func BenchmarkClearFat128(b *testing.B) {
357 for i := 0; i < b.N; i++ {
358 var x [128 / 4]uint32
359 _ = x
362 func BenchmarkClearFat256(b *testing.B) {
363 for i := 0; i < b.N; i++ {
364 var x [256 / 4]uint32
365 _ = x
368 func BenchmarkClearFat512(b *testing.B) {
369 for i := 0; i < b.N; i++ {
370 var x [512 / 4]uint32
371 _ = x
374 func BenchmarkClearFat1024(b *testing.B) {
375 for i := 0; i < b.N; i++ {
376 var x [1024 / 4]uint32
377 _ = x
381 func BenchmarkCopyFat8(b *testing.B) {
382 var x [8 / 4]uint32
383 for i := 0; i < b.N; i++ {
384 y := x
385 _ = y
388 func BenchmarkCopyFat12(b *testing.B) {
389 var x [12 / 4]uint32
390 for i := 0; i < b.N; i++ {
391 y := x
392 _ = y
395 func BenchmarkCopyFat16(b *testing.B) {
396 var x [16 / 4]uint32
397 for i := 0; i < b.N; i++ {
398 y := x
399 _ = y
402 func BenchmarkCopyFat24(b *testing.B) {
403 var x [24 / 4]uint32
404 for i := 0; i < b.N; i++ {
405 y := x
406 _ = y
409 func BenchmarkCopyFat32(b *testing.B) {
410 var x [32 / 4]uint32
411 for i := 0; i < b.N; i++ {
412 y := x
413 _ = y
416 func BenchmarkCopyFat64(b *testing.B) {
417 var x [64 / 4]uint32
418 for i := 0; i < b.N; i++ {
419 y := x
420 _ = y
423 func BenchmarkCopyFat128(b *testing.B) {
424 var x [128 / 4]uint32
425 for i := 0; i < b.N; i++ {
426 y := x
427 _ = y
430 func BenchmarkCopyFat256(b *testing.B) {
431 var x [256 / 4]uint32
432 for i := 0; i < b.N; i++ {
433 y := x
434 _ = y
437 func BenchmarkCopyFat512(b *testing.B) {
438 var x [512 / 4]uint32
439 for i := 0; i < b.N; i++ {
440 y := x
441 _ = y
444 func BenchmarkCopyFat1024(b *testing.B) {
445 var x [1024 / 4]uint32
446 for i := 0; i < b.N; i++ {
447 y := x
448 _ = y
452 func BenchmarkIssue18740(b *testing.B) {
453 // This tests that memmove uses one 4-byte load/store to move 4 bytes.
454 // It used to do 2 2-byte load/stores, which leads to a pipeline stall
455 // when we try to read the result with one 4-byte load.
456 var buf [4]byte
457 for j := 0; j < b.N; j++ {
458 s := uint32(0)
459 for i := 0; i < 4096; i += 4 {
460 copy(buf[:], g[i:])
461 s += binary.LittleEndian.Uint32(buf[:])
463 sink = uint64(s)
467 // TODO: 2 byte and 8 byte benchmarks also.
469 var g [4096]byte