2 * "memcpy" implementation of SuperH
4 * Copyright (C) 1999 Niibe Yutaka
5 * Copyright (c) 2002 STMicroelectronics Ltd
6 * Modified from memcpy.S and micro-optimised for SH4
7 * Stuart Menefy (stuart.menefy@st.com)
10 #include <linux/linkage.h>
13 * void *memcpy(void *dst, const void *src, size_t n);
15 * It is assumed that there is no overlap between src and dst.
16 * If there is an overlap, then the results are undefined.
20 ! GHIJ KLMN OPQR --> ...G HIJK LMNO PQR.
23 ! Size is 16 or greater, and may have trailing bytes
27 ! Read a long word and write a long word at once
28 ! At the start of each iteration, r7 contains last long load
30 mov r4,r2 ! 5 MT (0 cycles latency)
32 mov.l @(r0,r5),r7 ! 21 LS (2 cycles latency)
37 #ifdef CONFIG_CPU_LITTLE_ENDIAN
38 ! 6 cycles, 4 bytes per iteration
39 3: mov.l @(r0,r5),r1 ! 21 LS (latency=2) ! NMLK
40 mov r7, r3 ! 5 MT (latency=0) ! RQPO
45 mov r1,r6 ! 5 MT (latency=0)
46 shll8 r3 ! 102 EX ! Oxxx
48 shlr8 r6 ! 106 EX ! xNML
49 mov r1, r7 ! 5 MT (latency=0)
51 or r6,r3 ! 82 EX ! ONML
56 3: mov.l @(r0,r5),r1 ! 21 LS (latency=2) ! KLMN
57 mov r7,r3 ! 5 MT (latency=0) ! OPQR
62 shlr8 r3 ! 106 EX ! xxxO
63 mov r1,r6 ! 5 MT (latency=0)
65 shll8 r6 ! 102 EX ! LMNx
66 mov r1,r7 ! 5 MT (latency=0)
68 or r6,r3 ! 82 EX ! LMNO
73 ! Finally, copy a byte at once, if necessary
81 8: cmp/hi r2,r0 ! 57 MT
82 mov.b @(r0,r5),r1 ! 20 LS (latency=2)
93 ! GHIJ KLMN OPQR --> .GHI JKLM NOPQ R...
96 ! Size is 16 or greater, and may have trailing bytes
100 ! Read a long word and write a long word at once
101 ! At the start of each iteration, r7 contains last long load
103 mov r4,r2 ! 5 MT (0 cycles latency)
105 mov.l @(r0,r5),r7 ! 21 LS (2 cycles latency)
110 #ifdef CONFIG_CPU_LITTLE_ENDIAN
111 ! 6 cycles, 4 bytes per iteration
112 3: mov.l @(r0,r5),r1 ! 21 LS (latency=2) ! NMLK
113 mov r7, r3 ! 5 MT (latency=0) ! RQPO
116 shll8 r3 ! 102 EX ! QPOx
118 mov r1,r6 ! 5 MT (latency=0)
121 shlr8 r6 ! 106 EX ! xxxN
122 mov r1, r7 ! 5 MT (latency=0)
124 or r6,r3 ! 82 EX ! QPON
127 mov.l r3,@-r0 ! 30 LS
131 mov.l @(r0,r5),r1 ! KLMN
141 ! Finally, copy a byte at once, if necessary
149 8: cmp/hi r2,r0 ! 57 MT
150 mov.b @(r0,r5),r1 ! 20 LS (latency=2)
154 mov.b r1,@-r0 ! 29 LS
161 ! Calculate the invariants which will be used in the remainder
164 ! r4 --> [ ... ] DST [ ... ] SRC
167 ! r0 --> [ ... ] r0+r5 --> [ ... ]
171 ! Short circuit the common case of src, dst and len being 32 bit aligned
172 ! and test for zero length move
174 mov r6, r0 ! 5 MT (0 cycle latency)
180 bt/s 99f ! 111 BR (zero len)
183 mov r4, r0 ! 5 MT (0 cycle latency)
187 bt/s .Lcase00 ! 111 BR (aligned)
191 ! Arguments are not nicely long word aligned or zero len.
192 ! Check for small copies, and if so do a simple byte at a time copy.
194 ! Deciding on an exact value of 'small' is not easy, as the point at which
195 ! using the optimised routines become worthwhile varies (these are the
196 ! cycle counts for differnet sizes using byte-at-a-time vs. optimised):
197 ! size byte-at-time long word byte
198 ! 16 42 39-40 46-50 50-55
199 ! 24 58 43-44 54-58 62-67
200 ! 36 82 49-50 66-70 80-85
201 ! However the penalty for getting it 'wrong' is much higher for long word
202 ! aligned data (and this is more common), so use a value of 16.
207 bf/s 6f ! 108 BR (not small)
209 mov r5, r3 ! 5 MT (latency=0)
212 mov.b @(r0,r5),r1 ! 20 LS (latency=2)
219 mov.b r1,@-r0 ! 29 LS
221 ! 4 cycles, 2 bytes per iteration
222 3: mov.b @(r0,r5),r1 ! 20 LS (latency=2)
224 4: mov.b @(r0,r3),r2 ! 20 LS (latency=2)
227 mov.b r1,@-r0 ! 29 LS
230 mov.b r2,@-r0 ! 29 LS
238 ! Size is not small, so its worthwhile looking for optimisations.
239 ! First align destination to a long word boundary.
241 ! r5 = normal value -1
243 6: tst #3, r0 ! 87 MT
249 ! 3 cycles, 1 byte per iteration
251 mov.b @(r0,r5),r1 ! 19 LS (latency=2)
256 mov.b r1,@-r0 ! 28 LS
258 2: add #1, r5 ! 79 EX
260 ! Now select the appropriate bulk transfer code based on relative
261 ! alignment of src and dst.
263 mov r0, r3 ! 5 MT (latency=0)
265 mov r5, r0 ! 5 MT (latency=0)
273 cmp/ge r7, r6 ! 55 MT
293 1: tst #2, r0 ! 87 MT
303 ! GHIJ KLMN OPQR --> GHIJ KLMN OPQR
306 ! src, dst and size are all long word aligned
312 mov r5, r3 ! 5 MT (latency=0)
314 cmp/gt r6, r1 ! 56 MT
317 bf .Lcase00b ! 108 BR (big loop)
321 mov.l @(r0, r5), r1 ! 21 LS (latency=2)
329 mov.l r1,@-r0 ! 30 LS
331 ! 4 cycles, 2 long words per iteration
332 3: mov.l @(r0, r5), r1 ! 21 LS (latency=2)
334 4: mov.l @(r0, r3), r2 ! 21 LS (latency=2)
337 mov.l r1, @-r0 ! 30 LS
340 mov.l r2, @-r0 ! 30 LS
346 ! Size is 16 or greater and less than 64, but may have trailing bytes
351 mov r4, r7 ! 5 MT (latency=0)
353 mov.l @(r0, r5), r1 ! 21 LS (latency=2)
359 mov r5, r3 ! 5 MT (latency=0)
363 mov.l r1,@-r0 ! 30 LS
365 ! 4 cycles, 2 long words per iteration
366 3: mov.l @(r0, r5), r1 ! 21 LS (latency=2)
368 4: mov.l @(r0, r3), r2 ! 21 LS (latency=2)
371 mov.l r1, @-r0 ! 30 LS
374 mov.l r2, @-r0 ! 30 LS
376 ! Copy the final 0-3 bytes
380 cmp/eq r0, r4 ! 54 MT
385 ! 3 cycles, 1 byte per iteration
386 1: mov.b @(r0,r5),r1 ! 19 LS
390 mov.b r1,@-r0 ! 28 LS
395 ! Size is at least 64 bytes, so will be going round the big loop at least once.
398 ! r3 = rounded down r0
405 mov r0, r3 ! 5 MT (latency=0)
406 mov #(~0x1f), r1 ! 6 EX
409 mov r4, r2 ! 5 MT (latency=0)
411 cmp/eq r3, r0 ! 54 MT
412 add #0x1f, r2 ! 50 EX
417 ! copy initial words until cache line aligned
419 mov.l @(r0, r5), r1 ! 21 LS (latency=2)
422 mov r5, r6 ! 5 MT (latency=0)
428 tst #0x18, r0 ! 87 MT
431 mov.l r1,@-r0 ! 30 LS
433 ! 4 cycles, 2 long words per iteration
434 3: mov.l @(r0, r5), r1 ! 21 LS (latency=2)
436 4: mov.l @(r0, r6), r7 ! 21 LS (latency=2)
437 cmp/eq r3, r0 ! 54 MT
439 mov.l r1, @-r0 ! 30 LS
442 mov.l r7, @-r0 ! 30 LS
444 ! Copy the cache line aligned blocks
446 ! In use: r0, r2, r4, r5
447 ! Scratch: r1, r3, r6, r7
449 ! We could do this with the four scratch registers, but if src
450 ! and dest hit the same cache line, this will thrash, so make
451 ! use of additional registers.
453 ! We also need r0 as a temporary (for movca), so 'undo' the invariant:
454 ! r5: src (was r0+r5)
456 ! this can be reversed at the end, so we don't need to save any extra
459 1: mov.l r8, @-r15 ! 30 LS
462 mov.l r9, @-r15 ! 30 LS
463 mov r0, r1 ! 5 MT (latency=0)
465 mov.l r10, @-r15 ! 30 LS
466 add #-0x1c, r5 ! 50 EX
468 mov.l r11, @-r15 ! 30 LS
470 ! 16 cycles, 32 bytes per iteration
471 2: mov.l @(0x00,r5),r0 ! 18 LS (latency=2)
472 add #-0x20, r1 ! 50 EX
473 mov.l @(0x04,r5),r3 ! 18 LS (latency=2)
474 mov.l @(0x08,r5),r6 ! 18 LS (latency=2)
475 mov.l @(0x0c,r5),r7 ! 18 LS (latency=2)
476 mov.l @(0x10,r5),r8 ! 18 LS (latency=2)
477 mov.l @(0x14,r5),r9 ! 18 LS (latency=2)
478 mov.l @(0x18,r5),r10 ! 18 LS (latency=2)
479 mov.l @(0x1c,r5),r11 ! 18 LS (latency=2)
480 movca.l r0,@r1 ! 40 LS (latency=3-7)
481 mov.l r3,@(0x04,r1) ! 33 LS
482 mov.l r6,@(0x08,r1) ! 33 LS
483 mov.l r7,@(0x0c,r1) ! 33 LS
485 mov.l r8,@(0x10,r1) ! 33 LS
486 add #-0x20, r5 ! 50 EX
488 mov.l r9,@(0x14,r1) ! 33 LS
491 mov.l r10,@(0x18,r1) ! 33 LS
494 mov.l r11,@(0x1c,r1) ! 33 LS
496 mov r1, r0 ! 5 MT (latency=0)
498 mov.l @r15+, r11 ! 15 LS
501 mov.l @r15+, r10 ! 15 LS
502 cmp/eq r4, r0 ! 54 MT
505 mov.l @r15+, r9 ! 15 LS
508 1: mov.l @r15+, r8 ! 15 LS
509 sub r4, r1 ! 75 EX (len remaining)
511 ! number of trailing bytes is non-zero
513 ! invariants restored (r5 already decremented by 4)
514 ! also r1=num bytes remaining
517 mov r4, r7 ! 5 MT (latency=0)
519 add #0x1c, r5 ! 50 EX (back to -4)
520 cmp/hs r2, r1 ! 58 MT
525 mov.l @(r0, r5), r6 ! 21 LS (latency=2)
528 mov r5, r3 ! 5 MT (latency=0)
532 cmp/hs r2, r1 ! 58 MT
535 mov.l r6,@-r0 ! 30 LS
537 ! 4 cycles, 2 long words per iteration
538 3: mov.l @(r0, r5), r6 ! 21 LS (latency=2)
540 4: mov.l @(r0, r3), r2 ! 21 LS (latency=2)
543 mov.l r6, @-r0 ! 30 LS
546 mov.l r2, @-r0 ! 30 LS
548 ! Copy the final 0-3 bytes
550 5: cmp/eq r0, r4 ! 54 MT
556 ! 3 cycles, 1 byte per iteration
557 1: mov.b @(r0,r5),r1 ! 19 LS
561 mov.b r1,@-r0 ! 28 LS
567 ! GHIJ KLMN OPQR --> ..GH IJKL MNOP QR..
572 ! Size is 16 or greater and less then 64, but may have trailing bytes
574 2: mov r5, r6 ! 5 MT (latency=0)
577 mov r4,r2 ! 5 MT (latency=0)
581 3: mov.w @(r0,r5),r1 ! 20 LS (latency=2)
583 mov.w @(r0,r6),r3 ! 20 LS (latency=2)
586 mov.w r1,@-r0 ! 29 LS
589 mov.w r3,@-r0 ! 29 LS
597 ! Size is at least 64 bytes, so will be going round the big loop at least once.
600 ! r3 = rounded down r0
602 mov r0, r3 ! 5 MT (latency=0)
603 mov #(~0x1f), r1 ! 6 EX
606 mov r4, r2 ! 5 MT (latency=0)
608 cmp/eq r3, r0 ! 54 MT
609 add #0x1f, r2 ! 50 EX
615 ! Copy a short word one at a time until we are cache line aligned
616 ! Normal values: r0, r2, r3, r4
622 2: mov.w @(r0,r5),r1 ! 20 LS (latency=2)
627 mov.w r1,@-r0 ! 29 LS
629 ! Copy the cache line aligned blocks
631 ! In use: r0, r2, r4, r5 (=r5-2)
632 ! Scratch: r1, r3, r6, r7
634 ! We could do this with the four scratch registers, but if src
635 ! and dest hit the same cache line, this will thrash, so make
636 ! use of additional registers.
638 ! We also need r0 as a temporary (for movca), so 'undo' the invariant:
639 ! r5: src (was r0+r5)
641 ! this can be reversed at the end, so we don't need to save any extra
644 1: mov.l r8, @-r15 ! 30 LS
647 mov.l r9, @-r15 ! 30 LS
648 mov r0, r1 ! 5 MT (latency=0)
650 mov.l r10, @-r15 ! 30 LS
651 add #-0x1e, r5 ! 50 EX
653 mov.l r11, @-r15 ! 30 LS
655 mov.l r12, @-r15 ! 30 LS
657 ! 17 cycles, 32 bytes per iteration
658 #ifdef CONFIG_CPU_LITTLE_ENDIAN
659 2: mov.w @r5+, r0 ! 14 LS (latency=2) ..JI
660 add #-0x20, r1 ! 50 EX
662 mov.l @r5+, r3 ! 15 LS (latency=2) NMLK
664 mov.l @r5+, r6 ! 15 LS (latency=2) RQPO
665 shll16 r0 ! 103 EX JI..
667 mov.l @r5+, r7 ! 15 LS (latency=2)
668 xtrct r3, r0 ! 48 EX LKJI
670 mov.l @r5+, r8 ! 15 LS (latency=2)
671 xtrct r6, r3 ! 48 EX PONM
673 mov.l @r5+, r9 ! 15 LS (latency=2)
676 mov.l @r5+, r10 ! 15 LS (latency=2)
679 mov.l @r5+, r11 ! 15 LS (latency=2)
682 mov.w @r5+, r12 ! 15 LS (latency=2)
683 xtrct r10, r9 ! 48 EX
685 movca.l r0,@r1 ! 40 LS (latency=3-7)
686 xtrct r11, r10 ! 48 EX
688 mov.l r3, @(0x04,r1) ! 33 LS
689 xtrct r12, r11 ! 48 EX
691 mov.l r6, @(0x08,r1) ! 33 LS
693 mov.l r7, @(0x0c,r1) ! 33 LS
695 mov.l r8, @(0x10,r1) ! 33 LS
696 add #-0x40, r5 ! 50 EX
698 mov.l r9, @(0x14,r1) ! 33 LS
701 mov.l r10, @(0x18,r1) ! 33 LS
704 mov.l r11, @(0x1c,r1) ! 33 LS
706 2: mov.w @(0x1e,r5), r0 ! 17 LS (latency=2)
709 mov.l @(0x1c,r5), r3 ! 18 LS (latency=2)
712 mov.l @(0x18,r5), r6 ! 18 LS (latency=2)
715 mov.l @(0x14,r5), r7 ! 18 LS (latency=2)
718 mov.l @(0x10,r5), r8 ! 18 LS (latency=2)
721 mov.l @(0x0c,r5), r9 ! 18 LS (latency=2)
724 mov.l @(0x08,r5), r10 ! 18 LS (latency=2)
727 mov.l @(0x04,r5), r11 ! 18 LS (latency=2)
730 mov.l @(0x00,r5), r12 ! 18 LS (latency=2)
731 xtrct r10, r9 ! 48 EX
733 movca.l r0,@r1 ! 40 LS (latency=3-7)
734 add #-0x1c, r1 ! 50 EX
736 mov.l r3, @(0x1c,r1) ! 33 LS
737 xtrct r11, r10 ! 48 EX
739 mov.l r6, @(0x18,r1) ! 33 LS
740 xtrct r12, r11 ! 48 EX
742 mov.l r7, @(0x14,r1) ! 33 LS
744 mov.l r8, @(0x10,r1) ! 33 LS
745 add #-0x3e, r5 ! 50 EX
747 mov.l r9, @(0x0c,r1) ! 33 LS
750 mov.l r10, @(0x08,r1) ! 33 LS
753 mov.l r11, @(0x04,r1) ! 33 LS
757 mov r1, r0 ! 5 MT (latency=0)
759 mov.l @r15+, r11 ! 15 LS
762 mov.l @r15+, r10 ! 15 LS
763 cmp/eq r4, r0 ! 54 MT
766 mov.l @r15+, r9 ! 15 LS
769 1: mov.l @r15+, r8 ! 15 LS
771 add #0x1e, r5 ! 50 EX
773 ! Finish off a short word at a time
774 ! r5 must be invariant - 2
775 10: mov r4,r2 ! 5 MT (latency=0)
778 cmp/hi r2, r0 ! 57 MT
783 3: mov.w @(r0,r5),r1 ! 20 LS
788 mov.w r1,@-r0 ! 29 LS
792 ! Finally, copy the last byte if necessary