1 /* memcpy.S: Sparc optimized memcpy and memmove code
2 * Hand optimized from GNU libc's memcpy and memmove
3 * Copyright (C) 1991,1996 Free Software Foundation
4 * Copyright (C) 1995 Linus Torvalds (Linus.Torvalds@helsinki.fi)
5 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
6 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
7 * Copyright (C) 1996 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
17 /* In kernel these functions don't return a value.
18 * One should use macros in asm/string.h for that purpose.
19 * We return 0, so that bugs are more apparent.
22 #define RETL_INSN clr %o0
24 /* Both these macros have to start with exactly the same insn */
25 #define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
26 ldd [%src + (offset) + 0x00], %t0; \
27 ldd [%src + (offset) + 0x08], %t2; \
28 ldd [%src + (offset) + 0x10], %t4; \
29 ldd [%src + (offset) + 0x18], %t6; \
30 st %t0, [%dst + (offset) + 0x00]; \
31 st %t1, [%dst + (offset) + 0x04]; \
32 st %t2, [%dst + (offset) + 0x08]; \
33 st %t3, [%dst + (offset) + 0x0c]; \
34 st %t4, [%dst + (offset) + 0x10]; \
35 st %t5, [%dst + (offset) + 0x14]; \
36 st %t6, [%dst + (offset) + 0x18]; \
37 st %t7, [%dst + (offset) + 0x1c];
39 #define MOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
40 ldd [%src + (offset) + 0x00], %t0; \
41 ldd [%src + (offset) + 0x08], %t2; \
42 ldd [%src + (offset) + 0x10], %t4; \
43 ldd [%src + (offset) + 0x18], %t6; \
44 std %t0, [%dst + (offset) + 0x00]; \
45 std %t2, [%dst + (offset) + 0x08]; \
46 std %t4, [%dst + (offset) + 0x10]; \
47 std %t6, [%dst + (offset) + 0x18];
49 #define MOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \
50 ldd [%src - (offset) - 0x10], %t0; \
51 ldd [%src - (offset) - 0x08], %t2; \
52 st %t0, [%dst - (offset) - 0x10]; \
53 st %t1, [%dst - (offset) - 0x0c]; \
54 st %t2, [%dst - (offset) - 0x08]; \
55 st %t3, [%dst - (offset) - 0x04];
57 #define MOVE_LASTALIGNCHUNK(src, dst, offset, t0, t1, t2, t3) \
58 ldd [%src - (offset) - 0x10], %t0; \
59 ldd [%src - (offset) - 0x08], %t2; \
60 std %t0, [%dst - (offset) - 0x10]; \
61 std %t2, [%dst - (offset) - 0x08];
63 #define MOVE_SHORTCHUNK(src, dst, offset, t0, t1) \
64 ldub [%src - (offset) - 0x02], %t0; \
65 ldub [%src - (offset) - 0x01], %t1; \
66 stb %t0, [%dst - (offset) - 0x02]; \
67 stb %t1, [%dst - (offset) - 0x01];
69 /* Both these macros have to start with exactly the same insn */
70 #define RMOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
71 ldd [%src - (offset) - 0x20], %t0; \
72 ldd [%src - (offset) - 0x18], %t2; \
73 ldd [%src - (offset) - 0x10], %t4; \
74 ldd [%src - (offset) - 0x08], %t6; \
75 st %t0, [%dst - (offset) - 0x20]; \
76 st %t1, [%dst - (offset) - 0x1c]; \
77 st %t2, [%dst - (offset) - 0x18]; \
78 st %t3, [%dst - (offset) - 0x14]; \
79 st %t4, [%dst - (offset) - 0x10]; \
80 st %t5, [%dst - (offset) - 0x0c]; \
81 st %t6, [%dst - (offset) - 0x08]; \
82 st %t7, [%dst - (offset) - 0x04];
84 #define RMOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
85 ldd [%src - (offset) - 0x20], %t0; \
86 ldd [%src - (offset) - 0x18], %t2; \
87 ldd [%src - (offset) - 0x10], %t4; \
88 ldd [%src - (offset) - 0x08], %t6; \
89 std %t0, [%dst - (offset) - 0x20]; \
90 std %t2, [%dst - (offset) - 0x18]; \
91 std %t4, [%dst - (offset) - 0x10]; \
92 std %t6, [%dst - (offset) - 0x08];
94 #define RMOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \
95 ldd [%src + (offset) + 0x00], %t0; \
96 ldd [%src + (offset) + 0x08], %t2; \
97 st %t0, [%dst + (offset) + 0x00]; \
98 st %t1, [%dst + (offset) + 0x04]; \
99 st %t2, [%dst + (offset) + 0x08]; \
100 st %t3, [%dst + (offset) + 0x0c];
102 #define RMOVE_SHORTCHUNK(src, dst, offset, t0, t1) \
103 ldub [%src + (offset) + 0x00], %t0; \
104 ldub [%src + (offset) + 0x01], %t1; \
105 stb %t0, [%dst + (offset) + 0x00]; \
106 stb %t1, [%dst + (offset) + 0x01];
108 #define SMOVE_CHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, prev, shil, shir, offset2) \
109 ldd [%src + (offset) + 0x00], %t0; \
110 ldd [%src + (offset) + 0x08], %t2; \
111 srl %t0, shir, %t5; \
112 srl %t1, shir, %t6; \
113 sll %t0, shil, %t0; \
114 or %t5, %prev, %t5; \
115 sll %t1, shil, %prev; \
117 srl %t2, shir, %t1; \
118 srl %t3, shir, %t6; \
119 sll %t2, shil, %t2; \
120 or %t1, %prev, %t1; \
121 std %t4, [%dst + (offset) + (offset2) - 0x04]; \
122 std %t0, [%dst + (offset) + (offset2) + 0x04]; \
123 sll %t3, shil, %prev; \
126 #define SMOVE_ALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, prev, shil, shir, offset2) \
127 ldd [%src + (offset) + 0x00], %t0; \
128 ldd [%src + (offset) + 0x08], %t2; \
129 srl %t0, shir, %t4; \
130 srl %t1, shir, %t5; \
131 sll %t0, shil, %t6; \
132 or %t4, %prev, %t0; \
133 sll %t1, shil, %prev; \
135 srl %t2, shir, %t4; \
136 srl %t3, shir, %t5; \
137 sll %t2, shil, %t6; \
138 or %t4, %prev, %t2; \
139 sll %t3, shil, %prev; \
141 std %t0, [%dst + (offset) + (offset2) + 0x00]; \
142 std %t2, [%dst + (offset) + (offset2) + 0x08];
149 nop ! Only bcopy returns here and it retuns void...
171 1: /* reverse_bytes */
183 /* NOTE: This code is executed just for the cases,
184 where %src (=%o1) & 3 is != 0.
185 We need to align it to 4. So, for (%src & 3)
186 1 we need to do ldub,lduh
189 so even if it looks weird, the branches
190 are correct here. -jj
192 78: /* dword_align */
212 FUNC(memcpy) /* %o0=dst %o1=src %o2=len */
238 andcc %g1, 0xffffff80, %g0
244 MOVE_BIGCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5)
245 MOVE_BIGCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
246 MOVE_BIGCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
247 MOVE_BIGCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
263 jmpl %o5 + %lo(80f), %g0
266 79: /* memcpy_table */
268 MOVE_LASTCHUNK(o1, o0, 0x60, g2, g3, g4, g5)
269 MOVE_LASTCHUNK(o1, o0, 0x50, g2, g3, g4, g5)
270 MOVE_LASTCHUNK(o1, o0, 0x40, g2, g3, g4, g5)
271 MOVE_LASTCHUNK(o1, o0, 0x30, g2, g3, g4, g5)
272 MOVE_LASTCHUNK(o1, o0, 0x20, g2, g3, g4, g5)
273 MOVE_LASTCHUNK(o1, o0, 0x10, g2, g3, g4, g5)
274 MOVE_LASTCHUNK(o1, o0, 0x00, g2, g3, g4, g5)
276 80: /* memcpy_table_end */
286 81: /* memcpy_last7 */
314 MOVE_BIGALIGNCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5)
315 MOVE_BIGALIGNCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
316 MOVE_BIGALIGNCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
317 MOVE_BIGALIGNCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
331 jmpl %o5 + %lo(84f), %g0
334 83: /* amemcpy_table */
336 MOVE_LASTALIGNCHUNK(o1, o0, 0x60, g2, g3, g4, g5)
337 MOVE_LASTALIGNCHUNK(o1, o0, 0x50, g2, g3, g4, g5)
338 MOVE_LASTALIGNCHUNK(o1, o0, 0x40, g2, g3, g4, g5)
339 MOVE_LASTALIGNCHUNK(o1, o0, 0x30, g2, g3, g4, g5)
340 MOVE_LASTALIGNCHUNK(o1, o0, 0x20, g2, g3, g4, g5)
341 MOVE_LASTALIGNCHUNK(o1, o0, 0x10, g2, g3, g4, g5)
342 MOVE_LASTALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5)
344 84: /* amemcpy_table_end */
350 std %g2, [%o0 - 0x08]
352 85: /* amemcpy_last7 */
378 86: /* non_aligned */
501 restore %g0, %g0, %o0
512 jmpl %o5 + %lo(89f), %g0
515 MOVE_SHORTCHUNK(o1, o0, 0x0c, g2, g3)
516 MOVE_SHORTCHUNK(o1, o0, 0x0a, g2, g3)
517 MOVE_SHORTCHUNK(o1, o0, 0x08, g2, g3)
518 MOVE_SHORTCHUNK(o1, o0, 0x06, g2, g3)
519 MOVE_SHORTCHUNK(o1, o0, 0x04, g2, g3)
520 MOVE_SHORTCHUNK(o1, o0, 0x02, g2, g3)
521 MOVE_SHORTCHUNK(o1, o0, 0x00, g2, g3)
523 89: /* short_table_end */
534 90: /* short_aligned_end */