1 /* Copyright (C) 2012-2015 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
4 The GNU C Library is free software; you can redistribute it and/or
5 modify it under the terms of the GNU Lesser General Public
6 License as published by the Free Software Foundation; either
7 version 2.1 of the License, or (at your option) any later version.
9 The GNU C Library is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 Lesser General Public License for more details.
14 You should have received a copy of the GNU Lesser General Public
15 License along with the GNU C Library. If not, see
16 <http://www.gnu.org/licenses/>. */
18 #ifdef ANDROID_CHANGES
19 # include "machine/asm.h"
20 # include "machine/regdef.h"
21 # define USE_MEMMOVE_FOR_OVERLAP
22 # define PREFETCH_LOAD_HINT PREFETCH_HINT_LOAD_STREAMED
23 # define PREFETCH_STORE_HINT PREFETCH_HINT_PREPAREFORSTORE
28 # define PREFETCH_LOAD_HINT PREFETCH_HINT_LOAD_STREAMED
29 # define PREFETCH_STORE_HINT PREFETCH_HINT_PREPAREFORSTORE
30 #elif _COMPILING_NEWLIB
31 # include "machine/asm.h"
32 # include "machine/regdef.h"
33 # define PREFETCH_LOAD_HINT PREFETCH_HINT_LOAD_STREAMED
34 # define PREFETCH_STORE_HINT PREFETCH_HINT_PREPAREFORSTORE
40 #if (_MIPS_ISA == _MIPS_ISA_MIPS4) || (_MIPS_ISA == _MIPS_ISA_MIPS5) || \
41 (_MIPS_ISA == _MIPS_ISA_MIPS32) || (_MIPS_ISA == _MIPS_ISA_MIPS64)
42 # ifndef DISABLE_PREFETCH
47 #if defined(_MIPS_SIM) && ((_MIPS_SIM == _ABI64) || (_MIPS_SIM == _ABIN32))
48 # ifndef DISABLE_DOUBLE
54 #if __mips_isa_rev > 5
55 # if (PREFETCH_STORE_HINT == PREFETCH_HINT_PREPAREFORSTORE)
56 # undef PREFETCH_STORE_HINT
57 # define PREFETCH_STORE_HINT PREFETCH_HINT_STORE_STREAMED
62 /* Some asm.h files do not have the L macro definition. */
64 # if _MIPS_SIM == _ABIO32
65 # define L(label) $L ## label
67 # define L(label) .L ## label
71 /* Some asm.h files do not have the PTR_ADDIU macro definition. */
74 # define PTR_ADDIU daddiu
76 # define PTR_ADDIU addiu
80 /* Some asm.h files do not have the PTR_SRA macro definition. */
89 /* New R6 instructions that may not be in asm.h. */
91 # if _MIPS_SIM == _ABI64
99 * Using PREFETCH_HINT_LOAD_STREAMED instead of PREFETCH_LOAD on load
100 * prefetches appears to offer a slight preformance advantage.
102 * Using PREFETCH_HINT_PREPAREFORSTORE instead of PREFETCH_STORE
103 * or PREFETCH_STORE_STREAMED offers a large performance advantage
104 * but PREPAREFORSTORE has some special restrictions to consider.
106 * Prefetch with the 'prepare for store' hint does not copy a memory
107 * location into the cache, it just allocates a cache line and zeros
108 * it out. This means that if you do not write to the entire cache
109 * line before writing it out to memory some data will get zero'ed out
110 * when the cache line is written back to memory and data will be lost.
112 * Also if you are using this memcpy to copy overlapping buffers it may
113 * not behave correctly when using the 'prepare for store' hint. If you
114 * use the 'prepare for store' prefetch on a memory area that is in the
115 * memcpy source (as well as the memcpy destination), then you will get
116 * some data zero'ed out before you have a chance to read it and data will
119 * If you are going to use this memcpy routine with the 'prepare for store'
120 * prefetch you may want to set USE_MEMMOVE_FOR_OVERLAP in order to avoid
121 * the problem of running memcpy on overlapping buffers.
123 * There are ifdef'ed sections of this memcpy to make sure that it does not
124 * do prefetches on cache lines that are not going to be completely written.
125 * This code is only needed and only used when PREFETCH_STORE_HINT is set to
126 * PREFETCH_HINT_PREPAREFORSTORE. This code assumes that cache lines are
127 * 32 bytes and if the cache line is larger it will not work correctly.
131 # define PREFETCH_HINT_LOAD 0
132 # define PREFETCH_HINT_STORE 1
133 # define PREFETCH_HINT_LOAD_STREAMED 4
134 # define PREFETCH_HINT_STORE_STREAMED 5
135 # define PREFETCH_HINT_LOAD_RETAINED 6
136 # define PREFETCH_HINT_STORE_RETAINED 7
137 # define PREFETCH_HINT_WRITEBACK_INVAL 25
138 # define PREFETCH_HINT_PREPAREFORSTORE 30
141 * If we have not picked out what hints to use at this point use the
142 * standard load and store prefetch hints.
144 # ifndef PREFETCH_STORE_HINT
145 # define PREFETCH_STORE_HINT PREFETCH_HINT_STORE
147 # ifndef PREFETCH_LOAD_HINT
148 # define PREFETCH_LOAD_HINT PREFETCH_HINT_LOAD
152 * We double everything when USE_DOUBLE is true so we do 2 prefetches to
153 * get 64 bytes in that case. The assumption is that each individual
154 * prefetch brings in 32 bytes.
158 # define PREFETCH_CHUNK 64
159 # define PREFETCH_FOR_LOAD(chunk, reg) \
160 pref PREFETCH_LOAD_HINT, (chunk)*64(reg); \
161 pref PREFETCH_LOAD_HINT, ((chunk)*64)+32(reg)
162 # define PREFETCH_FOR_STORE(chunk, reg) \
163 pref PREFETCH_STORE_HINT, (chunk)*64(reg); \
164 pref PREFETCH_STORE_HINT, ((chunk)*64)+32(reg)
166 # define PREFETCH_CHUNK 32
167 # define PREFETCH_FOR_LOAD(chunk, reg) \
168 pref PREFETCH_LOAD_HINT, (chunk)*32(reg)
169 # define PREFETCH_FOR_STORE(chunk, reg) \
170 pref PREFETCH_STORE_HINT, (chunk)*32(reg)
172 /* MAX_PREFETCH_SIZE is the maximum size of a prefetch, it must not be less
173 * than PREFETCH_CHUNK, the assumed size of each prefetch. If the real size
174 * of a prefetch is greater than MAX_PREFETCH_SIZE and the PREPAREFORSTORE
175 * hint is used, the code will not work correctly. If PREPAREFORSTORE is not
176 * used then MAX_PREFETCH_SIZE does not matter. */
177 # define MAX_PREFETCH_SIZE 128
178 /* PREFETCH_LIMIT is set based on the fact that we never use an offset greater
179 * than 5 on a STORE prefetch and that a single prefetch can never be larger
180 * than MAX_PREFETCH_SIZE. We add the extra 32 when USE_DOUBLE is set because
181 * we actually do two prefetches in that case, one 32 bytes after the other. */
183 # define PREFETCH_LIMIT (5 * PREFETCH_CHUNK) + 32 + MAX_PREFETCH_SIZE
185 # define PREFETCH_LIMIT (5 * PREFETCH_CHUNK) + MAX_PREFETCH_SIZE
187 # if (PREFETCH_STORE_HINT == PREFETCH_HINT_PREPAREFORSTORE) \
188 && ((PREFETCH_CHUNK * 4) < MAX_PREFETCH_SIZE)
189 /* We cannot handle this because the initial prefetches may fetch bytes that
190 * are before the buffer being copied. We start copies with an offset
191 * of 4 so avoid this situation when using PREPAREFORSTORE. */
192 #error "PREFETCH_CHUNK is too large and/or MAX_PREFETCH_SIZE is too small."
194 #else /* USE_PREFETCH not defined */
195 # define PREFETCH_FOR_LOAD(offset, reg)
196 # define PREFETCH_FOR_STORE(offset, reg)
199 /* Allow the routine to be named something else if desired. */
201 # define MEMCPY_NAME memcpy
204 /* We use these 32/64 bit registers as temporaries to do the copying. */
209 #if defined(_MIPS_SIM) && ((_MIPS_SIM == _ABIO32) || (_MIPS_SIM == _ABIO64))
221 /* We load/store 64 bits at a time when USE_DOUBLE is true.
222 * The C_ prefix stands for CHUNK and is used to avoid macro name
223 * conflicts with system header files. */
229 # define C_LDHI ldl /* high part is left in big-endian */
230 # define C_STHI sdl /* high part is left in big-endian */
231 # define C_LDLO ldr /* low part is right in big-endian */
232 # define C_STLO sdr /* low part is right in big-endian */
234 # define C_LDHI ldr /* high part is right in little-endian */
235 # define C_STHI sdr /* high part is right in little-endian */
236 # define C_LDLO ldl /* low part is left in little-endian */
237 # define C_STLO sdl /* low part is left in little-endian */
239 # define C_ALIGN dalign /* r6 align instruction */
244 # define C_LDHI lwl /* high part is left in big-endian */
245 # define C_STHI swl /* high part is left in big-endian */
246 # define C_LDLO lwr /* low part is right in big-endian */
247 # define C_STLO swr /* low part is right in big-endian */
249 # define C_LDHI lwr /* high part is right in little-endian */
250 # define C_STHI swr /* high part is right in little-endian */
251 # define C_LDLO lwl /* low part is left in little-endian */
252 # define C_STLO swl /* low part is left in little-endian */
254 # define C_ALIGN align /* r6 align instruction */
257 /* Bookkeeping values for 32 vs. 64 bit mode. */
260 # define NSIZEMASK 0x3f
261 # define NSIZEDMASK 0x7f
264 # define NSIZEMASK 0x1f
265 # define NSIZEDMASK 0x3f
267 #define UNIT(unit) ((unit)*NSIZE)
268 #define UNITM1(unit) (((unit)*NSIZE)-1)
270 #ifdef ANDROID_CHANGES
278 * Below we handle the case where memcpy is called with overlapping src and dst.
279 * Although memcpy is not required to handle this case, some parts of Android
280 * like Skia rely on such usage. We call memmove to handle such cases.
282 #ifdef USE_MEMMOVE_FOR_OVERLAP
288 beq t2,zero,L(memcpy)
295 * If the size is less than 2*NSIZE (8 or 16), go to L(lastb). Regardless of
296 * size, copy dst pointer to v0 for the return value.
298 slti t2,a2,(2 * NSIZE)
300 #if defined(RETURN_FIRST_PREFETCH) || defined(RETURN_LAST_PREFETCH)
309 * If src and dst have different alignments, go to L(unaligned), if they
310 * have the same alignment (but are not actually aligned) do a partial
311 * load/store to make them aligned. If they are both already aligned
312 * we can start copying at L(aligned).
315 andi t8,t8,(NSIZE-1) /* t8 is a0/a1 word-displacement */
316 bne t8,zero,L(unaligned)
317 PTR_SUBU a3, zero, a0
319 andi a3,a3,(NSIZE-1) /* copy a3 bytes to align a0/a1 */
320 beq a3,zero,L(aligned) /* if a3=0, it is already aligned */
321 PTR_SUBU a2,a2,a3 /* a2 is the remining bytes count */
331 * Align the destination and hope that the source gets aligned too. If it
332 * doesn't we jump to L(r6_unaligned*) to do unaligned copies using the r6
397 * Now dst/src are both aligned to (word or double word) aligned addresses
398 * Set a2 to count how many bytes we have to copy after all the 64/128 byte
399 * chunks are copied and a3 to the dst pointer after all the 64/128 byte
400 * chunks have been copied. We will loop, incrementing a0 and a1 until a0
404 andi t8,a2,NSIZEDMASK /* any whole 64-byte/128-byte chunks? */
405 beq a2,t8,L(chkw) /* if a2==t8, no 64-byte/128-byte chunks */
406 PTR_SUBU a3,a2,t8 /* subtract from a2 the reminder */
407 PTR_ADDU a3,a0,a3 /* Now a3 is the final dst after loop */
409 /* When in the loop we may prefetch with the 'prepare to store' hint,
410 * in this case the a0+x should not be past the "t0-32" address. This
411 * means: for x=128 the last "safe" a0 address is "t0-160". Alternatively,
412 * for x=64 the last "safe" a0 address is "t0-96" In the current version we
413 * will use "prefetch hint,128(a0)", so "t0-160" is the limit.
415 #if defined(USE_PREFETCH) && (PREFETCH_STORE_HINT == PREFETCH_HINT_PREPAREFORSTORE)
416 PTR_ADDU t0,a0,a2 /* t0 is the "past the end" address */
417 PTR_SUBU t9,t0,PREFETCH_LIMIT /* t9 is the "last safe pref" address */
419 PREFETCH_FOR_LOAD (0, a1)
420 PREFETCH_FOR_LOAD (1, a1)
421 PREFETCH_FOR_LOAD (2, a1)
422 PREFETCH_FOR_LOAD (3, a1)
423 #if defined(USE_PREFETCH) && (PREFETCH_STORE_HINT != PREFETCH_HINT_PREPAREFORSTORE)
424 PREFETCH_FOR_STORE (1, a0)
425 PREFETCH_FOR_STORE (2, a0)
426 PREFETCH_FOR_STORE (3, a0)
428 #if defined(RETURN_FIRST_PREFETCH) && defined(USE_PREFETCH)
429 # if PREFETCH_STORE_HINT == PREFETCH_HINT_PREPAREFORSTORE
433 PTR_ADDIU v0,a0,(PREFETCH_CHUNK*4)
436 PTR_ADDIU v0,a0,(PREFETCH_CHUNK*1)
439 #if defined(RETURN_LAST_PREFETCH) && defined(USE_PREFETCH) \
440 && (PREFETCH_STORE_HINT != PREFETCH_HINT_PREPAREFORSTORE)
441 PTR_ADDIU v0,a0,(PREFETCH_CHUNK*3)
448 #if defined(USE_PREFETCH) && (PREFETCH_STORE_HINT == PREFETCH_HINT_PREPAREFORSTORE)
449 sltu v1,t9,a0 /* If a0 > t9 don't use next prefetch */
454 PREFETCH_FOR_STORE (2, a0)
456 PREFETCH_FOR_STORE (4, a0)
457 PREFETCH_FOR_STORE (5, a0)
459 #if defined(RETURN_LAST_PREFETCH) && defined(USE_PREFETCH)
460 PTR_ADDIU v0,a0,(PREFETCH_CHUNK*5)
466 C_LD REG2,UNIT(2)(a1)
467 C_LD REG3,UNIT(3)(a1)
468 C_LD REG4,UNIT(4)(a1)
469 C_LD REG5,UNIT(5)(a1)
470 C_LD REG6,UNIT(6)(a1)
471 C_LD REG7,UNIT(7)(a1)
473 PREFETCH_FOR_LOAD (3, a1)
475 PREFETCH_FOR_LOAD (4, a1)
479 C_ST REG2,UNIT(2)(a0)
480 C_ST REG3,UNIT(3)(a0)
481 C_ST REG4,UNIT(4)(a0)
482 C_ST REG5,UNIT(5)(a0)
483 C_ST REG6,UNIT(6)(a0)
484 C_ST REG7,UNIT(7)(a0)
488 C_LD REG2,UNIT(10)(a1)
489 C_LD REG3,UNIT(11)(a1)
490 C_LD REG4,UNIT(12)(a1)
491 C_LD REG5,UNIT(13)(a1)
492 C_LD REG6,UNIT(14)(a1)
493 C_LD REG7,UNIT(15)(a1)
495 PREFETCH_FOR_LOAD (5, a1)
499 C_ST REG2,UNIT(10)(a0)
500 C_ST REG3,UNIT(11)(a0)
501 C_ST REG4,UNIT(12)(a0)
502 C_ST REG5,UNIT(13)(a0)
503 C_ST REG6,UNIT(14)(a0)
504 C_ST REG7,UNIT(15)(a0)
505 PTR_ADDIU a0,a0,UNIT(16) /* adding 64/128 to dest */
507 PTR_ADDIU a1,a1,UNIT(16) /* adding 64/128 to src */
510 /* Here we have src and dest word-aligned but less than 64-bytes or
511 * 128 bytes to go. Check for a 32(64) byte chunk and copy if if there
512 * is one. Otherwise jump down to L(chk1w) to handle the tail end of
517 PREFETCH_FOR_LOAD (0, a1)
518 andi t8,a2,NSIZEMASK /* Is there a 32-byte/64-byte chunk. */
519 /* The t8 is the reminder count past 32-bytes */
520 beq a2,t8,L(chk1w) /* When a2=t8, no 32-byte chunk */
524 C_LD REG2,UNIT(2)(a1)
525 C_LD REG3,UNIT(3)(a1)
526 C_LD REG4,UNIT(4)(a1)
527 C_LD REG5,UNIT(5)(a1)
528 C_LD REG6,UNIT(6)(a1)
529 C_LD REG7,UNIT(7)(a1)
530 PTR_ADDIU a1,a1,UNIT(8)
533 C_ST REG2,UNIT(2)(a0)
534 C_ST REG3,UNIT(3)(a0)
535 C_ST REG4,UNIT(4)(a0)
536 C_ST REG5,UNIT(5)(a0)
537 C_ST REG6,UNIT(6)(a0)
538 C_ST REG7,UNIT(7)(a0)
539 PTR_ADDIU a0,a0,UNIT(8)
542 * Here we have less than 32(64) bytes to copy. Set up for a loop to
543 * copy one word (or double word) at a time. Set a2 to count how many
544 * bytes we have to copy after all the word (or double word) chunks are
545 * copied and a3 to the dst pointer after all the (d)word chunks have
546 * been copied. We will loop, incrementing a0 and a1 until a0 equals a3.
549 andi a2,t8,(NSIZE-1) /* a2 is the reminder past one (d)word chunks */
551 PTR_SUBU a3,t8,a2 /* a3 is count of bytes in one (d)word chunks */
552 PTR_ADDU a3,a0,a3 /* a3 is the dst address after loop */
554 /* copying in words (4-byte or 8-byte chunks) */
556 C_LD REG3,UNIT(0)(a1)
557 PTR_ADDIU a0,a0,UNIT(1)
558 PTR_ADDIU a1,a1,UNIT(1)
559 bne a0,a3,L(wordCopy_loop)
560 C_ST REG3,UNIT(-1)(a0)
562 /* Copy the last 8 (or 16) bytes */
565 PTR_ADDU a3,a0,a2 /* a3 is the last dst address */
570 bne a0,a3,L(lastbloop)
578 * UNALIGNED case, got here with a3 = "negu a0"
579 * This code is nearly identical to the aligned code above
580 * but only the destination (not the source) gets aligned
581 * so we need to do partial loads of the source followed
582 * by normal stores to the destination (once we have aligned
587 andi a3,a3,(NSIZE-1) /* copy a3 bytes to align a0/a1 */
588 beqz a3,L(ua_chk16w) /* if a3=0, it is already aligned */
589 PTR_SUBU a2,a2,a3 /* a2 is the remining bytes count */
591 C_LDHI v1,UNIT(0)(a1)
592 C_LDLO v1,UNITM1(1)(a1)
594 C_STHI v1,UNIT(0)(a0)
598 * Now the destination (but not the source) is aligned
599 * Set a2 to count how many bytes we have to copy after all the 64/128 byte
600 * chunks are copied and a3 to the dst pointer after all the 64/128 byte
601 * chunks have been copied. We will loop, incrementing a0 and a1 until a0
606 andi t8,a2,NSIZEDMASK /* any whole 64-byte/128-byte chunks? */
607 beq a2,t8,L(ua_chkw) /* if a2==t8, no 64-byte/128-byte chunks */
608 PTR_SUBU a3,a2,t8 /* subtract from a2 the reminder */
609 PTR_ADDU a3,a0,a3 /* Now a3 is the final dst after loop */
611 # if defined(USE_PREFETCH) && (PREFETCH_STORE_HINT == PREFETCH_HINT_PREPAREFORSTORE)
612 PTR_ADDU t0,a0,a2 /* t0 is the "past the end" address */
613 PTR_SUBU t9,t0,PREFETCH_LIMIT /* t9 is the "last safe pref" address */
615 PREFETCH_FOR_LOAD (0, a1)
616 PREFETCH_FOR_LOAD (1, a1)
617 PREFETCH_FOR_LOAD (2, a1)
618 # if defined(USE_PREFETCH) && (PREFETCH_STORE_HINT != PREFETCH_HINT_PREPAREFORSTORE)
619 PREFETCH_FOR_STORE (1, a0)
620 PREFETCH_FOR_STORE (2, a0)
621 PREFETCH_FOR_STORE (3, a0)
623 # if defined(RETURN_FIRST_PREFETCH) && defined(USE_PREFETCH)
624 # if (PREFETCH_STORE_HINT == PREFETCH_HINT_PREPAREFORSTORE)
626 bgtz v1,L(ua_skip_set)
628 PTR_ADDIU v0,a0,(PREFETCH_CHUNK*4)
631 PTR_ADDIU v0,a0,(PREFETCH_CHUNK*1)
635 PREFETCH_FOR_LOAD (3, a1)
636 C_LDHI t0,UNIT(0)(a1)
637 C_LDHI t1,UNIT(1)(a1)
638 C_LDHI REG2,UNIT(2)(a1)
639 # if defined(USE_PREFETCH) && (PREFETCH_STORE_HINT == PREFETCH_HINT_PREPAREFORSTORE)
641 bgtz v1,L(ua_skip_pref)
643 C_LDHI REG3,UNIT(3)(a1)
644 PREFETCH_FOR_STORE (4, a0)
645 PREFETCH_FOR_STORE (5, a0)
647 C_LDHI REG4,UNIT(4)(a1)
648 C_LDHI REG5,UNIT(5)(a1)
649 C_LDHI REG6,UNIT(6)(a1)
650 C_LDHI REG7,UNIT(7)(a1)
651 C_LDLO t0,UNITM1(1)(a1)
652 C_LDLO t1,UNITM1(2)(a1)
653 C_LDLO REG2,UNITM1(3)(a1)
654 C_LDLO REG3,UNITM1(4)(a1)
655 C_LDLO REG4,UNITM1(5)(a1)
656 C_LDLO REG5,UNITM1(6)(a1)
657 C_LDLO REG6,UNITM1(7)(a1)
658 C_LDLO REG7,UNITM1(8)(a1)
659 PREFETCH_FOR_LOAD (4, a1)
662 C_ST REG2,UNIT(2)(a0)
663 C_ST REG3,UNIT(3)(a0)
664 C_ST REG4,UNIT(4)(a0)
665 C_ST REG5,UNIT(5)(a0)
666 C_ST REG6,UNIT(6)(a0)
667 C_ST REG7,UNIT(7)(a0)
668 C_LDHI t0,UNIT(8)(a1)
669 C_LDHI t1,UNIT(9)(a1)
670 C_LDHI REG2,UNIT(10)(a1)
671 C_LDHI REG3,UNIT(11)(a1)
672 C_LDHI REG4,UNIT(12)(a1)
673 C_LDHI REG5,UNIT(13)(a1)
674 C_LDHI REG6,UNIT(14)(a1)
675 C_LDHI REG7,UNIT(15)(a1)
676 C_LDLO t0,UNITM1(9)(a1)
677 C_LDLO t1,UNITM1(10)(a1)
678 C_LDLO REG2,UNITM1(11)(a1)
679 C_LDLO REG3,UNITM1(12)(a1)
680 C_LDLO REG4,UNITM1(13)(a1)
681 C_LDLO REG5,UNITM1(14)(a1)
682 C_LDLO REG6,UNITM1(15)(a1)
683 C_LDLO REG7,UNITM1(16)(a1)
684 PREFETCH_FOR_LOAD (5, a1)
687 C_ST REG2,UNIT(10)(a0)
688 C_ST REG3,UNIT(11)(a0)
689 C_ST REG4,UNIT(12)(a0)
690 C_ST REG5,UNIT(13)(a0)
691 C_ST REG6,UNIT(14)(a0)
692 C_ST REG7,UNIT(15)(a0)
693 PTR_ADDIU a0,a0,UNIT(16) /* adding 64/128 to dest */
694 bne a0,a3,L(ua_loop16w)
695 PTR_ADDIU a1,a1,UNIT(16) /* adding 64/128 to src */
698 /* Here we have src and dest word-aligned but less than 64-bytes or
699 * 128 bytes to go. Check for a 32(64) byte chunk and copy if if there
700 * is one. Otherwise jump down to L(ua_chk1w) to handle the tail end of
704 PREFETCH_FOR_LOAD (0, a1)
705 andi t8,a2,NSIZEMASK /* Is there a 32-byte/64-byte chunk. */
706 /* t8 is the reminder count past 32-bytes */
707 beq a2,t8,L(ua_chk1w) /* When a2=t8, no 32-byte chunk */
709 C_LDHI t0,UNIT(0)(a1)
710 C_LDHI t1,UNIT(1)(a1)
711 C_LDHI REG2,UNIT(2)(a1)
712 C_LDHI REG3,UNIT(3)(a1)
713 C_LDHI REG4,UNIT(4)(a1)
714 C_LDHI REG5,UNIT(5)(a1)
715 C_LDHI REG6,UNIT(6)(a1)
716 C_LDHI REG7,UNIT(7)(a1)
717 C_LDLO t0,UNITM1(1)(a1)
718 C_LDLO t1,UNITM1(2)(a1)
719 C_LDLO REG2,UNITM1(3)(a1)
720 C_LDLO REG3,UNITM1(4)(a1)
721 C_LDLO REG4,UNITM1(5)(a1)
722 C_LDLO REG5,UNITM1(6)(a1)
723 C_LDLO REG6,UNITM1(7)(a1)
724 C_LDLO REG7,UNITM1(8)(a1)
725 PTR_ADDIU a1,a1,UNIT(8)
728 C_ST REG2,UNIT(2)(a0)
729 C_ST REG3,UNIT(3)(a0)
730 C_ST REG4,UNIT(4)(a0)
731 C_ST REG5,UNIT(5)(a0)
732 C_ST REG6,UNIT(6)(a0)
733 C_ST REG7,UNIT(7)(a0)
734 PTR_ADDIU a0,a0,UNIT(8)
736 * Here we have less than 32(64) bytes to copy. Set up for a loop to
737 * copy one word (or double word) at a time.
740 andi a2,t8,(NSIZE-1) /* a2 is the reminder past one (d)word chunks */
741 beq a2,t8,L(ua_smallCopy)
742 PTR_SUBU a3,t8,a2 /* a3 is count of bytes in one (d)word chunks */
743 PTR_ADDU a3,a0,a3 /* a3 is the dst address after loop */
745 /* copying in words (4-byte or 8-byte chunks) */
747 C_LDHI v1,UNIT(0)(a1)
748 C_LDLO v1,UNITM1(1)(a1)
749 PTR_ADDIU a0,a0,UNIT(1)
750 PTR_ADDIU a1,a1,UNIT(1)
751 bne a0,a3,L(ua_wordCopy_loop)
754 /* Copy the last 8 (or 16) bytes */
757 PTR_ADDU a3,a0,a2 /* a3 is the last dst address */
758 L(ua_smallCopy_loop):
762 bne a0,a3,L(ua_smallCopy_loop)
771 # define SWAP_REGS(X,Y) X, Y
772 # define ALIGN_OFFSET(N) (N)
774 # define SWAP_REGS(X,Y) Y, X
775 # define ALIGN_OFFSET(N) (NSIZE-N)
777 # define R6_UNALIGNED_WORD_COPY(BYTEOFFSET) \
778 andi REG7, a2, (NSIZE-1);/* REG7 is # of bytes to by bytes. */ \
779 beq REG7, a2, L(lastb); /* Check for bytes to copy by word */ \
780 PTR_SUBU a3, a2, REG7; /* a3 is number of bytes to be copied in */ \
781 /* (d)word chunks. */ \
782 move a2, REG7; /* a2 is # of bytes to copy byte by byte */ \
783 /* after word loop is finished. */ \
784 PTR_ADDU REG6, a0, a3; /* REG6 is the dst address after loop. */ \
785 PTR_SUBU REG2, a1, t8; /* REG2 is the aligned src address. */ \
786 PTR_ADDU a1, a1, a3; /* a1 is addr of source after word loop. */ \
787 C_LD t0, UNIT(0)(REG2); /* Load first part of source. */ \
788 L(r6_ua_wordcopy##BYTEOFFSET): \
789 C_LD t1, UNIT(1)(REG2); /* Load second part of source. */ \
790 C_ALIGN REG3, SWAP_REGS(t1,t0), ALIGN_OFFSET(BYTEOFFSET); \
791 PTR_ADDIU a0, a0, UNIT(1); /* Increment destination pointer. */ \
792 PTR_ADDIU REG2, REG2, UNIT(1); /* Increment aligned source pointer.*/ \
793 move t0, t1; /* Move second part of source to first. */ \
794 bne a0, REG6,L(r6_ua_wordcopy##BYTEOFFSET); \
795 C_ST REG3, UNIT(-1)(a0); \
799 /* We are generating R6 code, the destination is 4 byte aligned and
800 the source is not 4 byte aligned. t8 is 1, 2, or 3 depending on the
801 alignment of the source. */
804 R6_UNALIGNED_WORD_COPY(1)
806 R6_UNALIGNED_WORD_COPY(2)
808 R6_UNALIGNED_WORD_COPY(3)
811 R6_UNALIGNED_WORD_COPY(4)
813 R6_UNALIGNED_WORD_COPY(5)
815 R6_UNALIGNED_WORD_COPY(6)
817 R6_UNALIGNED_WORD_COPY(7)
824 #ifndef ANDROID_CHANGES
826 libc_hidden_builtin_def (MEMCPY_NAME)