1 /* Optimized memcpy implementation for PowerPC64.
2 Copyright (C) 2003, 2006 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA
24 /* __ptr_t [r3] memcpy (__ptr_t dst [r3], __ptr_t src [r4], size_t len [r5]);
27 Memcpy handles short copies (< 32-bytes) using a binary move blocks
28 (no loops) of lwz/stw. The tail (remaining 1-3) bytes is handled
29 with the appropriate combination of byte and halfword load/stores.
30 There is minimal effort to optimize the alignment of short moves.
31 The 64-bit implementations of POWER3 and POWER4 do a reasonable job
32 of handling unligned load/stores that do not cross 32-byte boundries.
34 Longer moves (>= 32-bytes) justify the effort to get at least the
35 destination doubleword (8-byte) aligned. Further optimization is
36 posible when both source and destination are doubleword aligned.
37 Each case has a optimized unrolled loop. */
39 EALIGN (BP_SYM (memcpy), 5, 0)
47 andi. 11,3,7 /* check alignement of dst. */
48 clrldi 0,0,61 /* Number of bytes until the 1st doubleword of dst. */
49 clrldi 10,4,61 /* check alignement of src. */
51 ble- cr1,.L2 /* If move < 32 bytes use short move code. */
54 srdi 9,5,3 /* Number of full double words remaining. */
60 /* Move 0-7 bytes as needed to get the destination doubleword alligned. */
77 clrldi 10,12,61 /* check alignement of src again. */
78 srdi 9,31,3 /* Number of full double words remaining. */
80 /* Copy doublewords from source to destination, assumpting the
81 destination is aligned on a doubleword boundary.
83 At this point we know there are at least 25 bytes left (32-7) to copy.
84 The next step is to determine if the source is also doubleword aligned.
85 If not branch to the unaligned move code at .L6. which uses
86 a load, shift, store strategy.
88 Otherwise source and destination are doubleword aligned, and we can
89 the optimized doubleword copy loop. */
94 bne- cr6,.L6 /* If source is not DW aligned. */
96 /* Move doublewords where destination and source are DW aligned.
97 Use a unrolled loop to copy 4 doubleword (32-bytes) per iteration.
98 If the the copy is not an exact multiple of 32 bytes, 1-3
99 doublewords are copied as needed to set up the main loop. After
100 the main loop exits there may be a tail of 1-7 bytes. These byte
101 are copied a word/halfword/byte at a time as needed to preserve
104 For POWER6 the L1 is store-through and the L2 is store-in. The
105 L2 is clocked at half CPU clock so we can store 16 bytes every
106 other cycle. POWER6 also has a load/store bypass so we can do
107 load, load, store, store every 2 cycles.
109 For POWER6 unaligned loads will take a 20+ cycle hicup for any
110 L1 cache miss that crosses a 32- or 128-byte boundary. Store
111 is more forgiving and does not take a hicup until page or
112 segment boundaries. So we require doubleword alignment for
113 the source but may take a risk and only require word alignment
114 for the destination. */
198 /* At this point we have a tail of 0-7 bytes and we know that the
199 destiniation is double word aligned. */
214 /* Return original dst pointer. */
219 /* Copy up to 31 bytes. This divided into two cases 0-8 bytes and 9-31
220 bytes. Each case is handled without loops, using binary (1,2,4,8)
223 In the short (0-8 byte) case no attempt is made to force alignment
224 of either source or destination. The hardware will handle the
225 unaligned load/stores with small delays for crossing 32- 128-byte,
226 and 4096-byte boundaries. Since these short moves are unlikely to be
227 unaligned or cross these boundaries, the overhead to force
228 alignment is not justified.
230 The longer (9-31 byte) move is more likely to cross 32- or 128-byte
231 boundaries. Since only loads are sensitive to the 32-/128-byte
232 boundaries it is more important to align the source then the
233 destination. If the source is not already word aligned, we first
234 move 1-3 bytes as needed. Since we are only word aligned we don't
235 use double word load/stores to insure that all loads are aligned.
236 While the destination and stores may still be unaligned, this
237 is only an issue for page (4096 byte boundary) crossing, which
238 should be rare for these short moves. The hardware handles this
239 case automatically with a small (~20 cycle) delay. */
246 ble cr6,.LE8 /* Handle moves of 0-8 bytes. */
247 /* At least 9 bytes left. Get the source word aligned. */
252 beq L(dus_tail) /* If the source is already word aligned skip this. */
253 /* Copy 1-3 bytes to get source address word aligned. */
276 /* At least 6 bytes left and the source is word aligned. This allows
277 some speculative loads up front. */
278 /* We need to special case the fall-through because the biggest delays
279 are due to address computation not being ready in time for the
285 L(dus_tail16): /* Move 16 bytes. */
292 /* Move 8 bytes more. */
293 bf 28,L(dus_tail16p8)
299 /* Move 4 bytes more. */
300 bf 29,L(dus_tail16p4)
306 /* exactly 28 bytes. Return original dst pointer and exit. */
310 L(dus_tail16p8): /* less then 8 bytes left. */
311 beq cr1,L(dus_tailX) /* exactly 16 bytes, early exit. */
313 bf 29,L(dus_tail16p2)
314 /* Move 4 bytes more. */
320 /* exactly 20 bytes. Return original dst pointer and exit. */
324 L(dus_tail16p4): /* less then 4 bytes left. */
328 /* exactly 24 bytes. Return original dst pointer and exit. */
332 L(dus_tail16p2): /* 16 bytes moved, less then 4 bytes left. */
338 L(dus_tail8): /* Move 8 bytes. */
339 /* r6, r7 already loaded speculatively. */
345 /* Move 4 bytes more. */
352 /* exactly 12 bytes. Return original dst pointer and exit. */
356 L(dus_tail8p4): /* less then 4 bytes left. */
360 /* exactly 8 bytes. Return original dst pointer and exit. */
365 L(dus_tail4): /* Move 4 bytes. */
366 /* r6 already loaded speculatively. If we are here we know there is
367 more then 4 bytes left. So there is no need to test. */
371 L(dus_tail2): /* Move 2-3 bytes. */
380 L(dus_tail1): /* Move 1 byte. */
385 /* Return original dst pointer. */
389 /* Special case to copy 0-8 bytes. */
394 /* Exactly 8 bytes. We may cross a 32-/128-byte boundry and take a ~20
395 cycle delay. This case should be rare and any attempt to avoid this
396 would take most of 20 cycles any way. */
399 /* Return original dst pointer. */
421 /* Return original dst pointer. */
428 /* Copy doublewords where the destination is aligned but the source is
429 not. Use aligned doubleword loads from the source, shifted to realign
430 the data, to allow aligned destination stores. */
431 addi 11,9,-1 /* loop DW count is one less than total */
432 subf 5,10,12 /* Move source addr to previous full double word. */
436 srdi 8,11,2 /* calculate the 32 byte loop count */
437 ld 6,0(5) /* pre load 1st full doubleword. */
441 ld 7,8(5) /* pre load 2nd full doubleword. */
451 /* there are at least two DWs to copy */
464 blt cr6,L(du1_fini) /* if total DWs = 3, then bypass loop */
466 /* there is a third DW to copy */
475 beq cr6,L(du1_fini) /* if total DWs = 4, then bypass loop */
490 /* copy 32 bytes at a time */
517 /* calculate and store the final DW */
528 /* there are at least two DWs to copy */
541 blt cr6,L(du2_fini) /* if total DWs = 3, then bypass loop */
543 /* there is a third DW to copy */
552 beq cr6,L(du2_fini) /* if total DWs = 4, then bypass loop */
567 /* copy 32 bytes at a time */
594 /* calculate and store the final DW */
605 /* there are at least two DWs to copy */
618 blt cr6,L(du3_fini) /* if total DWs = 3, then bypass loop */
620 /* there is a third DW to copy */
629 beq cr6,L(du3_fini) /* if total DWs = 4, then bypass loop */
644 /* copy 32 bytes at a time */
671 /* calculate and store the final DW */
688 /* there are at least two DWs to copy */
701 blt cr6,L(du4_fini) /* if total DWs = 3, then bypass loop */
703 /* there is a third DW to copy */
712 beq cr6,L(du4_fini) /* if total DWs = 4, then bypass loop */
727 /* copy 32 bytes at a time */
754 /* calculate and store the final DW */
765 /* there are at least two DWs to copy */
778 blt cr6,L(du5_fini) /* if total DWs = 3, then bypass loop */
780 /* there is a third DW to copy */
789 beq cr6,L(du5_fini) /* if total DWs = 4, then bypass loop */
804 /* copy 32 bytes at a time */
831 /* calculate and store the final DW */
842 /* there are at least two DWs to copy */
855 blt cr6,L(du6_fini) /* if total DWs = 3, then bypass loop */
857 /* there is a third DW to copy */
866 beq cr6,L(du6_fini) /* if total DWs = 4, then bypass loop */
881 /* copy 32 bytes at a time */
908 /* calculate and store the final DW */
919 /* there are at least two DWs to copy */
932 blt cr6,L(du7_fini) /* if total DWs = 3, then bypass loop */
934 /* there is a third DW to copy */
943 beq cr6,L(du7_fini) /* if total DWs = 4, then bypass loop */
958 /* copy 32 bytes at a time */
985 /* calculate and store the final DW */
996 bne cr1,.L9 /* If the tail is 0 bytes we are done! */
997 /* Return original dst pointer. */
1001 END_GEN_TB (BP_SYM (memcpy),TB_TOCLESS)
1002 libc_hidden_builtin_def (memcpy)