1 // Copyright 2013 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
19 func TestMemmove(t
*testing
.T
) {
28 src
:= make([]byte, size
)
29 dst
:= make([]byte, size
)
30 for i
:= 0; i
< size
; i
++ {
31 src
[i
] = byte(128 + (i
& 127))
33 for i
:= 0; i
< size
; i
++ {
34 dst
[i
] = byte(i
& 127)
36 for n
:= 0; n
<= size
; n
++ {
37 for x
:= 0; x
<= size
-n
; x
++ { // offset in src
38 for y
:= 0; y
<= size
-n
; y
++ { // offset in dst
39 copy(dst
[y
:y
+n
], src
[x
:x
+n
])
40 for i
:= 0; i
< y
; i
++ {
41 if dst
[i
] != byte(i
&127) {
42 t
.Fatalf("prefix dst[%d] = %d", i
, dst
[i
])
45 for i
:= y
; i
< y
+n
; i
++ {
46 if dst
[i
] != byte(128+((i
-y
+x
)&127)) {
47 t
.Fatalf("copied dst[%d] = %d", i
, dst
[i
])
49 dst
[i
] = byte(i
& 127) // reset dst
51 for i
:= y
+ n
; i
< size
; i
++ {
52 if dst
[i
] != byte(i
&127) {
53 t
.Fatalf("suffix dst[%d] = %d", i
, dst
[i
])
61 func TestMemmoveAlias(t
*testing
.T
) {
70 buf
:= make([]byte, size
)
71 for i
:= 0; i
< size
; i
++ {
74 for n
:= 0; n
<= size
; n
++ {
75 for x
:= 0; x
<= size
-n
; x
++ { // src offset
76 for y
:= 0; y
<= size
-n
; y
++ { // dst offset
77 copy(buf
[y
:y
+n
], buf
[x
:x
+n
])
78 for i
:= 0; i
< y
; i
++ {
79 if buf
[i
] != byte(i
) {
80 t
.Fatalf("prefix buf[%d] = %d", i
, buf
[i
])
83 for i
:= y
; i
< y
+n
; i
++ {
84 if buf
[i
] != byte(i
-y
+x
) {
85 t
.Fatalf("copied buf[%d] = %d", i
, buf
[i
])
87 buf
[i
] = byte(i
) // reset buf
89 for i
:= y
+ n
; i
< size
; i
++ {
90 if buf
[i
] != byte(i
) {
91 t
.Fatalf("suffix buf[%d] = %d", i
, buf
[i
])
99 func TestMemmoveLarge0x180000(t
*testing
.T
) {
100 if testing
.Short() && testenv
.Builder() == "" {
106 t
.Skip("skipping large memmove test under race detector")
108 testSize(t
, 0x180000)
111 func TestMemmoveOverlapLarge0x120000(t
*testing
.T
) {
112 if testing
.Short() && testenv
.Builder() == "" {
118 t
.Skip("skipping large memmove test under race detector")
120 testOverlap(t
, 0x120000)
123 func testSize(t
*testing
.T
, size
int) {
124 src
:= make([]byte, size
)
125 dst
:= make([]byte, size
)
126 _
, _
= rand
.Read(src
)
127 _
, _
= rand
.Read(dst
)
129 ref
:= make([]byte, size
)
132 for n
:= size
- 50; n
> 1; n
>>= 1 {
133 for x
:= 0; x
<= size
-n
; x
= x
*7 + 1 { // offset in src
134 for y
:= 0; y
<= size
-n
; y
= y
*9 + 1 { // offset in dst
135 copy(dst
[y
:y
+n
], src
[x
:x
+n
])
136 copyref(ref
[y
:y
+n
], src
[x
:x
+n
])
139 t
.Fatalf("Copy failed, copying from src[%d:%d] to dst[%d:%d].\nOffset %d is different, %v != %v", x
, x
+n
, y
, y
+n
, p
, dst
[p
], ref
[p
])
146 func testOverlap(t
*testing
.T
, size
int) {
147 src
:= make([]byte, size
)
148 test
:= make([]byte, size
)
149 ref
:= make([]byte, size
)
150 _
, _
= rand
.Read(src
)
152 for n
:= size
- 50; n
> 1; n
>>= 1 {
153 for x
:= 0; x
<= size
-n
; x
= x
*7 + 1 { // offset in src
154 for y
:= 0; y
<= size
-n
; y
= y
*9 + 1 { // offset in dst
158 copy(test
[y
:y
+n
], test
[x
:x
+n
])
160 copyref(ref
[y
:y
+n
], ref
[x
:x
+n
])
162 copybw(ref
[y
:y
+n
], ref
[x
:x
+n
])
166 t
.Fatalf("Copy failed, copying from src[%d:%d] to dst[%d:%d].\nOffset %d is different, %v != %v", x
, x
+n
, y
, y
+n
, p
, test
[p
], ref
[p
])
175 func copyref(dst
, src
[]byte) {
176 for i
, v
:= range src
{
182 func copybw(dst
, src
[]byte) {
186 for i
:= len(src
) - 1; i
>= 0; i
-- {
191 // Returns offset of difference
192 func matchLen(a
, b
[]byte, max
int) int {
195 for i
, av
:= range a
{
203 func cmpb(a
, b
[]byte) int {
204 l
:= matchLen(a
, b
, len(a
))
211 // Ensure that memmove writes pointers atomically, so the GC won't
212 // observe a partially updated pointer.
213 func TestMemmoveAtomicity(t
*testing
.T
) {
215 t
.Skip("skip under the race detector -- this test is intentionally racy")
220 for _
, backward
:= range []bool{true, false} {
221 for _
, n
:= range []int{3, 4, 5, 6, 7, 8, 9, 10, 15, 25, 49} {
224 // test copying [N]*int.
225 sz
:= uintptr(n
* PtrSize
)
226 name
:= fmt
.Sprint(sz
)
232 t
.Run(name
, func(t
*testing
.T
) {
233 // Use overlapping src and dst to force forward/backward copy.
235 src
:= s
[n
-1 : 2*n
-1]
249 sp
:= unsafe
.Pointer(&src
[0])
250 dp
:= unsafe
.Pointer(&dst
[0])
251 atomic
.StoreUint32(&ready
, 1)
252 for i
:= 0; i
< 10000; i
++ {
254 MemclrNoHeapPointers(dp
, sz
)
256 atomic
.StoreUint32(&ready
, 2)
259 for atomic
.LoadUint32(&ready
) == 0 {
263 for atomic
.LoadUint32(&ready
) != 2 {
266 if p
!= nil && p
!= &x
{
267 t
.Fatalf("got partially updated pointer %p at dst[%d], want either nil or %p", p
, i
, &x
)
276 func benchmarkSizes(b
*testing
.B
, sizes
[]int, fn
func(b
*testing
.B
, n
int)) {
277 for _
, n
:= range sizes
{
278 b
.Run(fmt
.Sprint(n
), func(b
*testing
.B
) {
285 var bufSizes
= []int{
286 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
287 32, 64, 128, 256, 512, 1024, 2048, 4096,
290 func BenchmarkMemmove(b
*testing
.B
) {
291 benchmarkSizes(b
, bufSizes
, func(b
*testing
.B
, n
int) {
294 for i
:= 0; i
< b
.N
; i
++ {
300 func BenchmarkMemmoveUnalignedDst(b
*testing
.B
) {
301 benchmarkSizes(b
, bufSizes
, func(b
*testing
.B
, n
int) {
302 x
:= make([]byte, n
+1)
304 for i
:= 0; i
< b
.N
; i
++ {
310 func BenchmarkMemmoveUnalignedSrc(b
*testing
.B
) {
311 benchmarkSizes(b
, bufSizes
, func(b
*testing
.B
, n
int) {
313 y
:= make([]byte, n
+1)
314 for i
:= 0; i
< b
.N
; i
++ {
320 func TestMemclr(t
*testing
.T
) {
325 mem
:= make([]byte, size
)
326 for i
:= 0; i
< size
; i
++ {
329 for n
:= 0; n
< size
; n
++ {
330 for x
:= 0; x
<= size
-n
; x
++ { // offset in mem
331 MemclrBytes(mem
[x
: x
+n
])
332 for i
:= 0; i
< x
; i
++ {
334 t
.Fatalf("overwrite prefix mem[%d] = %d", i
, mem
[i
])
337 for i
:= x
; i
< x
+n
; i
++ {
339 t
.Fatalf("failed clear mem[%d] = %d", i
, mem
[i
])
343 for i
:= x
+ n
; i
< size
; i
++ {
345 t
.Fatalf("overwrite suffix mem[%d] = %d", i
, mem
[i
])
352 func BenchmarkMemclr(b
*testing
.B
) {
353 for _
, n
:= range []int{5, 16, 64, 256, 4096, 65536} {
355 b
.Run(fmt
.Sprint(n
), func(b
*testing
.B
) {
357 for i
:= 0; i
< b
.N
; i
++ {
362 for _
, m
:= range []int{1, 4, 8, 16, 64} {
363 x
:= make([]byte, m
<<20)
364 b
.Run(fmt
.Sprint(m
, "M"), func(b
*testing
.B
) {
365 b
.SetBytes(int64(m
<< 20))
366 for i
:= 0; i
< b
.N
; i
++ {
373 func BenchmarkGoMemclr(b
*testing
.B
) {
374 benchmarkSizes(b
, []int{5, 16, 64, 256}, func(b
*testing
.B
, n
int) {
376 for i
:= 0; i
< b
.N
; i
++ {
384 func BenchmarkClearFat8(b
*testing
.B
) {
385 for i
:= 0; i
< b
.N
; i
++ {
390 func BenchmarkClearFat12(b
*testing
.B
) {
391 for i
:= 0; i
< b
.N
; i
++ {
396 func BenchmarkClearFat16(b
*testing
.B
) {
397 for i
:= 0; i
< b
.N
; i
++ {
402 func BenchmarkClearFat24(b
*testing
.B
) {
403 for i
:= 0; i
< b
.N
; i
++ {
408 func BenchmarkClearFat32(b
*testing
.B
) {
409 for i
:= 0; i
< b
.N
; i
++ {
414 func BenchmarkClearFat40(b
*testing
.B
) {
415 for i
:= 0; i
< b
.N
; i
++ {
420 func BenchmarkClearFat48(b
*testing
.B
) {
421 for i
:= 0; i
< b
.N
; i
++ {
426 func BenchmarkClearFat56(b
*testing
.B
) {
427 for i
:= 0; i
< b
.N
; i
++ {
432 func BenchmarkClearFat64(b
*testing
.B
) {
433 for i
:= 0; i
< b
.N
; i
++ {
438 func BenchmarkClearFat128(b
*testing
.B
) {
439 for i
:= 0; i
< b
.N
; i
++ {
440 var x
[128 / 4]uint32
444 func BenchmarkClearFat256(b
*testing
.B
) {
445 for i
:= 0; i
< b
.N
; i
++ {
446 var x
[256 / 4]uint32
450 func BenchmarkClearFat512(b
*testing
.B
) {
451 for i
:= 0; i
< b
.N
; i
++ {
452 var x
[512 / 4]uint32
456 func BenchmarkClearFat1024(b
*testing
.B
) {
457 for i
:= 0; i
< b
.N
; i
++ {
458 var x
[1024 / 4]uint32
463 func BenchmarkCopyFat8(b
*testing
.B
) {
465 for i
:= 0; i
< b
.N
; i
++ {
470 func BenchmarkCopyFat12(b
*testing
.B
) {
472 for i
:= 0; i
< b
.N
; i
++ {
477 func BenchmarkCopyFat16(b
*testing
.B
) {
479 for i
:= 0; i
< b
.N
; i
++ {
484 func BenchmarkCopyFat24(b
*testing
.B
) {
486 for i
:= 0; i
< b
.N
; i
++ {
491 func BenchmarkCopyFat32(b
*testing
.B
) {
493 for i
:= 0; i
< b
.N
; i
++ {
498 func BenchmarkCopyFat64(b
*testing
.B
) {
500 for i
:= 0; i
< b
.N
; i
++ {
505 func BenchmarkCopyFat128(b
*testing
.B
) {
506 var x
[128 / 4]uint32
507 for i
:= 0; i
< b
.N
; i
++ {
512 func BenchmarkCopyFat256(b
*testing
.B
) {
513 var x
[256 / 4]uint32
514 for i
:= 0; i
< b
.N
; i
++ {
519 func BenchmarkCopyFat512(b
*testing
.B
) {
520 var x
[512 / 4]uint32
521 for i
:= 0; i
< b
.N
; i
++ {
526 func BenchmarkCopyFat520(b
*testing
.B
) {
527 var x
[520 / 4]uint32
528 for i
:= 0; i
< b
.N
; i
++ {
533 func BenchmarkCopyFat1024(b
*testing
.B
) {
534 var x
[1024 / 4]uint32
535 for i
:= 0; i
< b
.N
; i
++ {
541 func BenchmarkIssue18740(b
*testing
.B
) {
542 // This tests that memmove uses one 4-byte load/store to move 4 bytes.
543 // It used to do 2 2-byte load/stores, which leads to a pipeline stall
544 // when we try to read the result with one 4-byte load.
546 for j
:= 0; j
< b
.N
; j
++ {
548 for i
:= 0; i
< 4096; i
+= 4 {
550 s
+= binary
.LittleEndian
.Uint32(buf
[:])
556 // TODO: 2 byte and 8 byte benchmarks also.