libstdc++: Avoid -Wmaybe-uninitialized warnings in text_encoding.cc
[official-gcc.git] / libgo / go / runtime / memmove_test.go
blob7c9d2ada45fd78d3d88be35fe4616a085e2b3715
1 // Copyright 2013 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 package runtime_test
7 import (
8 "crypto/rand"
9 "encoding/binary"
10 "fmt"
11 "internal/race"
12 "internal/testenv"
13 . "runtime"
14 "sync/atomic"
15 "testing"
16 "unsafe"
19 func TestMemmove(t *testing.T) {
20 if *flagQuick {
21 t.Skip("-quick")
23 t.Parallel()
24 size := 256
25 if testing.Short() {
26 size = 128 + 16
28 src := make([]byte, size)
29 dst := make([]byte, size)
30 for i := 0; i < size; i++ {
31 src[i] = byte(128 + (i & 127))
33 for i := 0; i < size; i++ {
34 dst[i] = byte(i & 127)
36 for n := 0; n <= size; n++ {
37 for x := 0; x <= size-n; x++ { // offset in src
38 for y := 0; y <= size-n; y++ { // offset in dst
39 copy(dst[y:y+n], src[x:x+n])
40 for i := 0; i < y; i++ {
41 if dst[i] != byte(i&127) {
42 t.Fatalf("prefix dst[%d] = %d", i, dst[i])
45 for i := y; i < y+n; i++ {
46 if dst[i] != byte(128+((i-y+x)&127)) {
47 t.Fatalf("copied dst[%d] = %d", i, dst[i])
49 dst[i] = byte(i & 127) // reset dst
51 for i := y + n; i < size; i++ {
52 if dst[i] != byte(i&127) {
53 t.Fatalf("suffix dst[%d] = %d", i, dst[i])
61 func TestMemmoveAlias(t *testing.T) {
62 if *flagQuick {
63 t.Skip("-quick")
65 t.Parallel()
66 size := 256
67 if testing.Short() {
68 size = 128 + 16
70 buf := make([]byte, size)
71 for i := 0; i < size; i++ {
72 buf[i] = byte(i)
74 for n := 0; n <= size; n++ {
75 for x := 0; x <= size-n; x++ { // src offset
76 for y := 0; y <= size-n; y++ { // dst offset
77 copy(buf[y:y+n], buf[x:x+n])
78 for i := 0; i < y; i++ {
79 if buf[i] != byte(i) {
80 t.Fatalf("prefix buf[%d] = %d", i, buf[i])
83 for i := y; i < y+n; i++ {
84 if buf[i] != byte(i-y+x) {
85 t.Fatalf("copied buf[%d] = %d", i, buf[i])
87 buf[i] = byte(i) // reset buf
89 for i := y + n; i < size; i++ {
90 if buf[i] != byte(i) {
91 t.Fatalf("suffix buf[%d] = %d", i, buf[i])
99 func TestMemmoveLarge0x180000(t *testing.T) {
100 if testing.Short() && testenv.Builder() == "" {
101 t.Skip("-short")
104 t.Parallel()
105 if race.Enabled {
106 t.Skip("skipping large memmove test under race detector")
108 testSize(t, 0x180000)
111 func TestMemmoveOverlapLarge0x120000(t *testing.T) {
112 if testing.Short() && testenv.Builder() == "" {
113 t.Skip("-short")
116 t.Parallel()
117 if race.Enabled {
118 t.Skip("skipping large memmove test under race detector")
120 testOverlap(t, 0x120000)
123 func testSize(t *testing.T, size int) {
124 src := make([]byte, size)
125 dst := make([]byte, size)
126 _, _ = rand.Read(src)
127 _, _ = rand.Read(dst)
129 ref := make([]byte, size)
130 copyref(ref, dst)
132 for n := size - 50; n > 1; n >>= 1 {
133 for x := 0; x <= size-n; x = x*7 + 1 { // offset in src
134 for y := 0; y <= size-n; y = y*9 + 1 { // offset in dst
135 copy(dst[y:y+n], src[x:x+n])
136 copyref(ref[y:y+n], src[x:x+n])
137 p := cmpb(dst, ref)
138 if p >= 0 {
139 t.Fatalf("Copy failed, copying from src[%d:%d] to dst[%d:%d].\nOffset %d is different, %v != %v", x, x+n, y, y+n, p, dst[p], ref[p])
146 func testOverlap(t *testing.T, size int) {
147 src := make([]byte, size)
148 test := make([]byte, size)
149 ref := make([]byte, size)
150 _, _ = rand.Read(src)
152 for n := size - 50; n > 1; n >>= 1 {
153 for x := 0; x <= size-n; x = x*7 + 1 { // offset in src
154 for y := 0; y <= size-n; y = y*9 + 1 { // offset in dst
155 // Reset input
156 copyref(test, src)
157 copyref(ref, src)
158 copy(test[y:y+n], test[x:x+n])
159 if y <= x {
160 copyref(ref[y:y+n], ref[x:x+n])
161 } else {
162 copybw(ref[y:y+n], ref[x:x+n])
164 p := cmpb(test, ref)
165 if p >= 0 {
166 t.Fatalf("Copy failed, copying from src[%d:%d] to dst[%d:%d].\nOffset %d is different, %v != %v", x, x+n, y, y+n, p, test[p], ref[p])
174 // Forward copy.
175 func copyref(dst, src []byte) {
176 for i, v := range src {
177 dst[i] = v
181 // Backwards copy
182 func copybw(dst, src []byte) {
183 if len(src) == 0 {
184 return
186 for i := len(src) - 1; i >= 0; i-- {
187 dst[i] = src[i]
191 // Returns offset of difference
192 func matchLen(a, b []byte, max int) int {
193 a = a[:max]
194 b = b[:max]
195 for i, av := range a {
196 if b[i] != av {
197 return i
200 return max
203 func cmpb(a, b []byte) int {
204 l := matchLen(a, b, len(a))
205 if l == len(a) {
206 return -1
208 return l
211 // Ensure that memmove writes pointers atomically, so the GC won't
212 // observe a partially updated pointer.
213 func TestMemmoveAtomicity(t *testing.T) {
214 if race.Enabled {
215 t.Skip("skip under the race detector -- this test is intentionally racy")
218 var x int
220 for _, backward := range []bool{true, false} {
221 for _, n := range []int{3, 4, 5, 6, 7, 8, 9, 10, 15, 25, 49} {
222 n := n
224 // test copying [N]*int.
225 sz := uintptr(n * PtrSize)
226 name := fmt.Sprint(sz)
227 if backward {
228 name += "-backward"
229 } else {
230 name += "-forward"
232 t.Run(name, func(t *testing.T) {
233 // Use overlapping src and dst to force forward/backward copy.
234 var s [100]*int
235 src := s[n-1 : 2*n-1]
236 dst := s[:n]
237 if backward {
238 src, dst = dst, src
240 for i := range src {
241 src[i] = &x
243 for i := range dst {
244 dst[i] = nil
247 var ready uint32
248 go func() {
249 sp := unsafe.Pointer(&src[0])
250 dp := unsafe.Pointer(&dst[0])
251 atomic.StoreUint32(&ready, 1)
252 for i := 0; i < 10000; i++ {
253 Memmove(dp, sp, sz)
254 MemclrNoHeapPointers(dp, sz)
256 atomic.StoreUint32(&ready, 2)
259 for atomic.LoadUint32(&ready) == 0 {
260 Gosched()
263 for atomic.LoadUint32(&ready) != 2 {
264 for i := range dst {
265 p := dst[i]
266 if p != nil && p != &x {
267 t.Fatalf("got partially updated pointer %p at dst[%d], want either nil or %p", p, i, &x)
276 func benchmarkSizes(b *testing.B, sizes []int, fn func(b *testing.B, n int)) {
277 for _, n := range sizes {
278 b.Run(fmt.Sprint(n), func(b *testing.B) {
279 b.SetBytes(int64(n))
280 fn(b, n)
285 var bufSizes = []int{
286 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
287 32, 64, 128, 256, 512, 1024, 2048, 4096,
289 var bufSizesOverlap = []int{
290 32, 64, 128, 256, 512, 1024, 2048, 4096,
293 func BenchmarkMemmove(b *testing.B) {
294 benchmarkSizes(b, bufSizes, func(b *testing.B, n int) {
295 x := make([]byte, n)
296 y := make([]byte, n)
297 for i := 0; i < b.N; i++ {
298 copy(x, y)
303 func BenchmarkMemmoveOverlap(b *testing.B) {
304 benchmarkSizes(b, bufSizesOverlap, func(b *testing.B, n int) {
305 x := make([]byte, n+16)
306 for i := 0; i < b.N; i++ {
307 copy(x[16:n+16], x[:n])
312 func BenchmarkMemmoveUnalignedDst(b *testing.B) {
313 benchmarkSizes(b, bufSizes, func(b *testing.B, n int) {
314 x := make([]byte, n+1)
315 y := make([]byte, n)
316 for i := 0; i < b.N; i++ {
317 copy(x[1:], y)
322 func BenchmarkMemmoveUnalignedDstOverlap(b *testing.B) {
323 benchmarkSizes(b, bufSizesOverlap, func(b *testing.B, n int) {
324 x := make([]byte, n+16)
325 for i := 0; i < b.N; i++ {
326 copy(x[16:n+16], x[1:n+1])
331 func BenchmarkMemmoveUnalignedSrc(b *testing.B) {
332 benchmarkSizes(b, bufSizes, func(b *testing.B, n int) {
333 x := make([]byte, n)
334 y := make([]byte, n+1)
335 for i := 0; i < b.N; i++ {
336 copy(x, y[1:])
341 func BenchmarkMemmoveUnalignedSrcOverlap(b *testing.B) {
342 benchmarkSizes(b, bufSizesOverlap, func(b *testing.B, n int) {
343 x := make([]byte, n+1)
344 for i := 0; i < b.N; i++ {
345 copy(x[1:n+1], x[:n])
350 func TestMemclr(t *testing.T) {
351 size := 512
352 if testing.Short() {
353 size = 128 + 16
355 mem := make([]byte, size)
356 for i := 0; i < size; i++ {
357 mem[i] = 0xee
359 for n := 0; n < size; n++ {
360 for x := 0; x <= size-n; x++ { // offset in mem
361 MemclrBytes(mem[x : x+n])
362 for i := 0; i < x; i++ {
363 if mem[i] != 0xee {
364 t.Fatalf("overwrite prefix mem[%d] = %d", i, mem[i])
367 for i := x; i < x+n; i++ {
368 if mem[i] != 0 {
369 t.Fatalf("failed clear mem[%d] = %d", i, mem[i])
371 mem[i] = 0xee
373 for i := x + n; i < size; i++ {
374 if mem[i] != 0xee {
375 t.Fatalf("overwrite suffix mem[%d] = %d", i, mem[i])
382 func BenchmarkMemclr(b *testing.B) {
383 for _, n := range []int{5, 16, 64, 256, 4096, 65536} {
384 x := make([]byte, n)
385 b.Run(fmt.Sprint(n), func(b *testing.B) {
386 b.SetBytes(int64(n))
387 for i := 0; i < b.N; i++ {
388 MemclrBytes(x)
392 for _, m := range []int{1, 4, 8, 16, 64} {
393 x := make([]byte, m<<20)
394 b.Run(fmt.Sprint(m, "M"), func(b *testing.B) {
395 b.SetBytes(int64(m << 20))
396 for i := 0; i < b.N; i++ {
397 MemclrBytes(x)
403 func BenchmarkGoMemclr(b *testing.B) {
404 benchmarkSizes(b, []int{5, 16, 64, 256}, func(b *testing.B, n int) {
405 x := make([]byte, n)
406 for i := 0; i < b.N; i++ {
407 for j := range x {
408 x[j] = 0
414 func BenchmarkClearFat8(b *testing.B) {
415 for i := 0; i < b.N; i++ {
416 var x [8 / 4]uint32
417 _ = x
420 func BenchmarkClearFat12(b *testing.B) {
421 for i := 0; i < b.N; i++ {
422 var x [12 / 4]uint32
423 _ = x
426 func BenchmarkClearFat16(b *testing.B) {
427 for i := 0; i < b.N; i++ {
428 var x [16 / 4]uint32
429 _ = x
432 func BenchmarkClearFat24(b *testing.B) {
433 for i := 0; i < b.N; i++ {
434 var x [24 / 4]uint32
435 _ = x
438 func BenchmarkClearFat32(b *testing.B) {
439 for i := 0; i < b.N; i++ {
440 var x [32 / 4]uint32
441 _ = x
444 func BenchmarkClearFat40(b *testing.B) {
445 for i := 0; i < b.N; i++ {
446 var x [40 / 4]uint32
447 _ = x
450 func BenchmarkClearFat48(b *testing.B) {
451 for i := 0; i < b.N; i++ {
452 var x [48 / 4]uint32
453 _ = x
456 func BenchmarkClearFat56(b *testing.B) {
457 for i := 0; i < b.N; i++ {
458 var x [56 / 4]uint32
459 _ = x
462 func BenchmarkClearFat64(b *testing.B) {
463 for i := 0; i < b.N; i++ {
464 var x [64 / 4]uint32
465 _ = x
468 func BenchmarkClearFat128(b *testing.B) {
469 for i := 0; i < b.N; i++ {
470 var x [128 / 4]uint32
471 _ = x
474 func BenchmarkClearFat256(b *testing.B) {
475 for i := 0; i < b.N; i++ {
476 var x [256 / 4]uint32
477 _ = x
480 func BenchmarkClearFat512(b *testing.B) {
481 for i := 0; i < b.N; i++ {
482 var x [512 / 4]uint32
483 _ = x
486 func BenchmarkClearFat1024(b *testing.B) {
487 for i := 0; i < b.N; i++ {
488 var x [1024 / 4]uint32
489 _ = x
493 func BenchmarkCopyFat8(b *testing.B) {
494 var x [8 / 4]uint32
495 for i := 0; i < b.N; i++ {
496 y := x
497 _ = y
500 func BenchmarkCopyFat12(b *testing.B) {
501 var x [12 / 4]uint32
502 for i := 0; i < b.N; i++ {
503 y := x
504 _ = y
507 func BenchmarkCopyFat16(b *testing.B) {
508 var x [16 / 4]uint32
509 for i := 0; i < b.N; i++ {
510 y := x
511 _ = y
514 func BenchmarkCopyFat24(b *testing.B) {
515 var x [24 / 4]uint32
516 for i := 0; i < b.N; i++ {
517 y := x
518 _ = y
521 func BenchmarkCopyFat32(b *testing.B) {
522 var x [32 / 4]uint32
523 for i := 0; i < b.N; i++ {
524 y := x
525 _ = y
528 func BenchmarkCopyFat64(b *testing.B) {
529 var x [64 / 4]uint32
530 for i := 0; i < b.N; i++ {
531 y := x
532 _ = y
535 func BenchmarkCopyFat128(b *testing.B) {
536 var x [128 / 4]uint32
537 for i := 0; i < b.N; i++ {
538 y := x
539 _ = y
542 func BenchmarkCopyFat256(b *testing.B) {
543 var x [256 / 4]uint32
544 for i := 0; i < b.N; i++ {
545 y := x
546 _ = y
549 func BenchmarkCopyFat512(b *testing.B) {
550 var x [512 / 4]uint32
551 for i := 0; i < b.N; i++ {
552 y := x
553 _ = y
556 func BenchmarkCopyFat520(b *testing.B) {
557 var x [520 / 4]uint32
558 for i := 0; i < b.N; i++ {
559 y := x
560 _ = y
563 func BenchmarkCopyFat1024(b *testing.B) {
564 var x [1024 / 4]uint32
565 for i := 0; i < b.N; i++ {
566 y := x
567 _ = y
571 // BenchmarkIssue18740 ensures that memmove uses 4 and 8 byte load/store to move 4 and 8 bytes.
572 // It used to do 2 2-byte load/stores, which leads to a pipeline stall
573 // when we try to read the result with one 4-byte load.
574 func BenchmarkIssue18740(b *testing.B) {
575 benchmarks := []struct {
576 name string
577 nbyte int
578 f func([]byte) uint64
580 {"2byte", 2, func(buf []byte) uint64 { return uint64(binary.LittleEndian.Uint16(buf)) }},
581 {"4byte", 4, func(buf []byte) uint64 { return uint64(binary.LittleEndian.Uint32(buf)) }},
582 {"8byte", 8, func(buf []byte) uint64 { return binary.LittleEndian.Uint64(buf) }},
585 var g [4096]byte
586 for _, bm := range benchmarks {
587 buf := make([]byte, bm.nbyte)
588 b.Run(bm.name, func(b *testing.B) {
589 for j := 0; j < b.N; j++ {
590 for i := 0; i < 4096; i += bm.nbyte {
591 copy(buf[:], g[i:])
592 sink += bm.f(buf[:])