2 * Copyright (C) 2008 The Android Open Source Project
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * Optimized memcpy() for ARM.
33 * note that memcpy() always returns the destination pointer,
34 * so we have to preserve R0.
38 * This file has been modified from the original for use in musl libc.
39 * The main changes are: addition of .type memcpy,%function to make the
40 * code safely callable from thumb mode, adjusting the return
41 * instructions to be compatible with pre-thumb ARM cpus, and removal
42 * of prefetch code that is not compatible with older cpus.
48 .type memcpy,%function
50 /* The stack must always be 64-bits aligned to be compliant with the
51 * ARM ABI. Since we have to save R0, we might as well save R4
52 * which we can use for better pipelining of the reads below
56 stmfd sp
!, {r0, r4, lr}
57 /* Making room for r5-r11 which will be spilled later */
61 /* it simplifies things to take care of len<4 early */
63 blo copy_last_3_and_return
65 /* compute the offset to align the source
66 * offset = (4-(src&3))&3 = -src & 3
72 /* align source to 32 bits. We need to insert 2 instructions between
73 * a ldr[b|h] and str[b|h] because byte and half-word instructions
77 sub r2, r2, r3 /* we know that r3 <= r2 because r2 >= 4 */
87 /* see if src and dst are aligned together (congruent) */
92 /* Use post-incriment mode for stm to spill r5-r11 to reserved stack
93 * frame. Don't update sp.
97 /* align the destination to a cache-line */
100 beq congruent_aligned32
104 /* conditionnaly copies 0 to 7 words (length in r3) */
105 movs
r12, r3, lsl
#28
106 ldmcs
r1!, {r4, r5, r6, r7} /* 16 bytes */
107 ldmmi
r1!, {r8, r9} /* 8 bytes */
108 stmcs
r0!, {r4, r5, r6, r7}
111 ldrne
r10,[r1], #4 /* 4 bytes */
117 * here source is aligned to 32 bytes.
122 blo less_than_32_left
125 * We preload a cache-line up to 64 bytes ahead. On the 926, this will
126 * stall only until the requested world is fetched, but the linefill
127 * continues in the the background.
128 * While the linefill is going, we write our previous cache-line
129 * into the write-buffer (which should have some free space).
130 * When the linefill is done, the writebuffer will
131 * start dumping its content into memory
133 * While all this is going, we then load a full cache line into
134 * 8 registers, this cache line should be in the cache by now
135 * (or partly in the cache).
137 * This code should work well regardless of the source/dest alignment.
141 /* Align the preload register to a cache-line because the cpu does
142 * "critical word first" (the first word requested is loaded first).
147 1: ldmia
r1!, { r4-
r11 }
151 * NOTE: if r12 is more than 64 ahead of r1, the following ldrhi
152 * for ARM9 preload will not be safely guarded by the preceding subs.
153 * When it is safely guarded the only possibility to have SIGSEGV here
154 * is because the caller overstates the length.
156 @ ldrhi
r3, [r12], #32 /* cheap ARM9 preload */
157 stmia
r0!, { r4-
r11 }
164 * less than 32 bytes left at this point (length in r2)
167 /* skip all this if there is nothing to do, which should
168 * be a common case (if not executed the code below takes
174 /* conditionnaly copies 0 to 31 bytes */
175 movs
r12, r2, lsl
#28
176 ldmcs
r1!, {r4, r5, r6, r7} /* 16 bytes */
177 ldmmi
r1!, {r8, r9} /* 8 bytes */
178 stmcs
r0!, {r4, r5, r6, r7}
180 movs
r12, r2, lsl
#30
181 ldrcs
r3, [r1], #4 /* 4 bytes */
182 ldrhmi
r4, [r1], #2 /* 2 bytes */
186 ldrbne
r3, [r1] /* last byte */
189 /* we're done! restore everything and return */
190 1: ldmfd sp
!, {r5-
r11}
191 ldmfd sp
!, {r0, r4, lr}
194 /********************************************************************/
198 * here source is aligned to 4 bytes
199 * but destination is not.
201 * in the code below r2 is the number of bytes read
202 * (the number of bytes written is always smaller, because we have
203 * partial words in the shift queue)
206 blo copy_last_3_and_return
208 /* Use post-incriment mode for stm to spill r5-r11 to reserved stack
209 * frame. Don't update sp.
213 /* compute shifts needed to align src to dest */
215 and r5, r5, #3 /* r5 = # bytes in partial words */
216 mov
r12, r5, lsl
#3 /* r12 = right */
217 rsb
lr, r12, #32 /* lr = left */
219 /* read the first word */
223 /* write a partial word (0 to 3 bytes), such that destination
224 * becomes aligned to 32 bits (r5 = nb of words to copy for alignment)
235 blo partial_word_tail
237 /* Align destination to 32 bytes (cache line boundary) */
242 orr
r4, r3, r5, lsl
lr
247 blo partial_word_tail
249 /* copy 32 bytes at a time */
251 blo less_than_thirtytwo
253 /* Use immediate mode for the shifts, because there is an extra cycle
254 * for register shifts, which could account for up to 50% of
266 ldmia
r1!, { r5,r6,r7, r8,r9,r10,r11}
269 orr
r3, r3, r4, lsl
#16
271 orr
r4, r4, r5, lsl
#16
273 orr
r5, r5, r6, lsl
#16
275 orr
r6, r6, r7, lsl
#16
277 orr
r7, r7, r8, lsl
#16
279 orr
r8, r8, r9, lsl
#16
281 orr
r9, r9, r10, lsl
#16
282 mov
r10, r10, lsr
#16
283 orr
r10, r10, r11, lsl
#16
284 stmia
r0!, {r3,r4,r5,r6, r7,r8,r9,r10}
287 b less_than_thirtytwo
292 ldmia
r1!, { r5,r6,r7, r8,r9,r10,r11}
295 orr
r3, r3, r4, lsl
#24
297 orr
r4, r4, r5, lsl
#24
299 orr
r5, r5, r6, lsl
#24
301 orr
r6, r6, r7, lsl
#24
303 orr
r7, r7, r8, lsl
#24
305 orr
r8, r8, r9, lsl
#24
307 orr
r9, r9, r10, lsl
#24
309 orr
r10, r10, r11, lsl
#24
310 stmia
r0!, {r3,r4,r5,r6, r7,r8,r9,r10}
313 b less_than_thirtytwo
318 ldmia
r1!, { r5,r6,r7, r8,r9,r10,r11}
321 orr
r3, r3, r4, lsl
#8
323 orr
r4, r4, r5, lsl
#8
325 orr
r5, r5, r6, lsl
#8
327 orr
r6, r6, r7, lsl
#8
329 orr
r7, r7, r8, lsl
#8
331 orr
r8, r8, r9, lsl
#8
333 orr
r9, r9, r10, lsl
#8
334 mov
r10, r10, lsr
#24
335 orr
r10, r10, r11, lsl
#8
336 stmia
r0!, {r3,r4,r5,r6, r7,r8,r9,r10}
341 /* copy the last 0 to 31 bytes of the source */
342 rsb
r12, lr, #32 /* we corrupted r12, recompute it */
345 blo partial_word_tail
349 orr
r4, r3, r5, lsl
lr
356 /* we have a partial word in the input buffer */
357 movs
r5, lr, lsl
#(31-3)
364 /* Refill spilled registers from the stack. Don't update sp. */
367 copy_last_3_and_return
:
368 movs
r2, r2, lsl
#31 /* copy remaining 0, 1, 2 or 3 bytes */
376 /* we're done! restore sp and spilled registers and return */
378 ldmfd sp
!, {r0, r4, lr}