1 /* Copy SIZE bytes from SRC to DEST.
3 Copyright (C) 1996-2015 Free Software Foundation, Inc.
4 This file is part of the GNU C Library.
5 Contributed by David S. Miller <davem@caip.rutgers.edu>,
6 Eddie C. Dost <ecd@skynet.be> and
7 Jakub Jelinek <jj@ultra.linux.cz>.
9 The GNU C Library is free software; you can redistribute it and/or
10 modify it under the terms of the GNU Lesser General Public
11 License as published by the Free Software Foundation; either
12 version 2.1 of the License, or (at your option) any later version.
14 The GNU C Library is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 Lesser General Public License for more details.
19 You should have received a copy of the GNU Lesser General Public
20 License along with the GNU C Library; if not, see
21 <http://www.gnu.org/licenses/>. */
25 /* Both these macros have to start with exactly the same insn */
26 #define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
27 ldd [%src + offset + 0x00], %t0; \
28 ldd [%src + offset + 0x08], %t2; \
29 ldd [%src + offset + 0x10], %t4; \
30 ldd [%src + offset + 0x18], %t6; \
31 st %t0, [%dst + offset + 0x00]; \
32 st %t1, [%dst + offset + 0x04]; \
33 st %t2, [%dst + offset + 0x08]; \
34 st %t3, [%dst + offset + 0x0c]; \
35 st %t4, [%dst + offset + 0x10]; \
36 st %t5, [%dst + offset + 0x14]; \
37 st %t6, [%dst + offset + 0x18]; \
38 st %t7, [%dst + offset + 0x1c];
40 #define MOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
41 ldd [%src + offset + 0x00], %t0; \
42 ldd [%src + offset + 0x08], %t2; \
43 ldd [%src + offset + 0x10], %t4; \
44 ldd [%src + offset + 0x18], %t6; \
45 std %t0, [%dst + offset + 0x00]; \
46 std %t2, [%dst + offset + 0x08]; \
47 std %t4, [%dst + offset + 0x10]; \
48 std %t6, [%dst + offset + 0x18];
50 #define MOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \
51 ldd [%src - offset - 0x10], %t0; \
52 ldd [%src - offset - 0x08], %t2; \
53 st %t0, [%dst - offset - 0x10]; \
54 st %t1, [%dst - offset - 0x0c]; \
55 st %t2, [%dst - offset - 0x08]; \
56 st %t3, [%dst - offset - 0x04];
58 #define MOVE_LASTALIGNCHUNK(src, dst, offset, t0, t1, t2, t3) \
59 ldd [%src - offset - 0x10], %t0; \
60 ldd [%src - offset - 0x08], %t2; \
61 std %t0, [%dst - offset - 0x10]; \
62 std %t2, [%dst - offset - 0x08];
64 #define MOVE_SHORTCHUNK(src, dst, offset, t0, t1) \
65 ldub [%src - offset - 0x02], %t0; \
66 ldub [%src - offset - 0x01], %t1; \
67 stb %t0, [%dst - offset - 0x02]; \
68 stb %t1, [%dst - offset - 0x01];
70 #define SMOVE_CHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, prev, shil, shir, offset2) \
71 ldd [%src + offset + 0x00], %t0; \
72 ldd [%src + offset + 0x08], %t2; \
77 sll %t1, shil, %prev; \
83 std %t4, [%dst + offset + offset2 - 0x04]; \
84 std %t0, [%dst + offset + offset2 + 0x04]; \
85 sll %t3, shil, %prev; \
88 #define SMOVE_ALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, prev, shil, shir, offset2) \
89 ldd [%src + offset + 0x00], %t0; \
90 ldd [%src + offset + 0x08], %t2; \
95 sll %t1, shil, %prev; \
100 or %t4, %prev, %t2; \
101 sll %t3, shil, %prev; \
103 std %t0, [%dst + offset + offset2 + 0x00]; \
104 std %t2, [%dst + offset + offset2 + 0x08];
114 ENTRY(memcpy) /* %o0=dst %o1=src %o2=len */
144 77: andcc %o1, 4, %g0
153 2: andcc %g1, 0xffffff80, %g6
158 5: MOVE_BIGCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5)
159 MOVE_BIGCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
160 MOVE_BIGCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
161 MOVE_BIGCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
166 3: andcc %g1, 0x70, %g6
176 jmpl %o5 + (80f - 104b), %g0
179 79: MOVE_LASTCHUNK(o1, o0, 0x60, g2, g3, g4, g5)
180 MOVE_LASTCHUNK(o1, o0, 0x50, g2, g3, g4, g5)
181 MOVE_LASTCHUNK(o1, o0, 0x40, g2, g3, g4, g5)
182 MOVE_LASTCHUNK(o1, o0, 0x30, g2, g3, g4, g5)
183 MOVE_LASTCHUNK(o1, o0, 0x20, g2, g3, g4, g5)
184 MOVE_LASTCHUNK(o1, o0, 0x10, g2, g3, g4, g5)
185 MOVE_LASTCHUNK(o1, o0, 0x00, g2, g3, g4, g5)
219 MOVE_BIGALIGNCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5)
220 MOVE_BIGALIGNCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
221 MOVE_BIGALIGNCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
222 MOVE_BIGALIGNCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
236 jmpl %o5 + (84f - 111b), %g0
239 83: MOVE_LASTALIGNCHUNK(o1, o0, 0x60, g2, g3, g4, g5)
240 MOVE_LASTALIGNCHUNK(o1, o0, 0x50, g2, g3, g4, g5)
241 MOVE_LASTALIGNCHUNK(o1, o0, 0x40, g2, g3, g4, g5)
242 MOVE_LASTALIGNCHUNK(o1, o0, 0x30, g2, g3, g4, g5)
243 MOVE_LASTALIGNCHUNK(o1, o0, 0x20, g2, g3, g4, g5)
244 MOVE_LASTALIGNCHUNK(o1, o0, 0x10, g2, g3, g4, g5)
245 MOVE_LASTALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5)
252 std %g2, [%o0 - 0x08]
356 9: ld [%o1 + 12], %g1
366 10: sll %o5, %g4, %g2
389 87: andcc %o1, 3, %g0
438 and %o2, 0xffffffc0, %o3
440 4: SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g6, g1, 8, 24, -3)
441 SMOVE_CHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g6, g1, 8, 24, -3)
442 SMOVE_CHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g6, g1, 8, 24, -3)
443 SMOVE_CHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g6, g1, 8, 24, -3)
452 4: SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g6, g1, 8, 24, -3)
473 and %o2, 0xffffffc0, %o3
475 4: SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g6, g1, 16, 16, -2)
476 SMOVE_CHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g6, g1, 16, 16, -2)
477 SMOVE_CHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g6, g1, 16, 16, -2)
478 SMOVE_CHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g6, g1, 16, 16, -2)
487 4: SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g6, g1, 16, 16, -2)
508 and %o2, 0xffffffc0, %o3
512 4: SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g6, g1, 24, 8, -1)
513 SMOVE_CHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g6, g1, 24, 8, -1)
514 SMOVE_CHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g6, g1, 24, 8, -1)
515 SMOVE_CHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g6, g1, 24, 8, -1)
524 4: SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g6, g1, 24, 8, -1)
534 41: SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g6, g1, 8, 24, -3)
535 SMOVE_ALIGNCHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g6, g1, 8, 24, -3)
536 SMOVE_ALIGNCHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g6, g1, 8, 24, -3)
537 SMOVE_ALIGNCHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g6, g1, 8, 24, -3)
546 4: SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g6, g1, 8, 24, -3)
553 1: sth %g2, [%o0 - 3]
557 43: SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g6, g1, 24, 8, 3)
558 SMOVE_ALIGNCHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g6, g1, 24, 8, 3)
559 SMOVE_ALIGNCHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g6, g1, 24, 8, 3)
560 SMOVE_ALIGNCHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g6, g1, 24, 8, 3)
569 4: SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g6, g1, 24, 8, 3)
576 1: stb %g2, [%o0 + 3]
579 42: SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g6, g1, 16, 16, -2)
580 SMOVE_ALIGNCHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g6, g1, 16, 16, -2)
581 SMOVE_ALIGNCHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g6, g1, 16, 16, -2)
582 SMOVE_ALIGNCHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g6, g1, 16, 16, -2)
591 4: SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g6, g1, 16, 16, -2)
598 1: sth %g2, [%o0 - 2]
602 88: and %o2, 0xe, %o3
609 jmpl %o5 + (89f - 106b), %g0
612 MOVE_SHORTCHUNK(o1, o0, 0x0c, g2, g3)
613 MOVE_SHORTCHUNK(o1, o0, 0x0a, g2, g3)
614 MOVE_SHORTCHUNK(o1, o0, 0x08, g2, g3)
615 MOVE_SHORTCHUNK(o1, o0, 0x06, g2, g3)
616 MOVE_SHORTCHUNK(o1, o0, 0x04, g2, g3)
617 MOVE_SHORTCHUNK(o1, o0, 0x02, g2, g3)
618 MOVE_SHORTCHUNK(o1, o0, 0x00, g2, g3)
649 libc_hidden_builtin_def (memcpy)
651 libc_hidden_def (__mempcpy)
652 weak_alias (__mempcpy, mempcpy)
653 libc_hidden_builtin_def (mempcpy)