1 /* Optimized memmove implementation for PowerPC64/POWER7.
2 Copyright (C) 2014-2023 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
22 /* void* [r3] memmove (void *dest [r3], const void *src [r4], size_t len [r5])
24 This optimization check if memory 'dest' overlaps with 'src'. If it does
25 not then it calls an optimized memcpy call (similar to memcpy for POWER7,
26 embedded here to gain some cycles).
27 If source and destiny overlaps, a optimized backwards memcpy is used
31 # define MEMMOVE memmove
34 ENTRY_TOCLESS (MEMMOVE, 5)
40 blt cr7,L(memmove_bwd)
44 ble cr1, L(copy_LT_32) /* If move < 32 bytes use short move
49 cmpld cr6,10,11 /* SRC and DST alignments match? */
52 bne cr6,L(copy_GE_32_unaligned)
58 /* Get the DST and SRC aligned to 8 bytes (16 for little-endian). */
86 /* Main aligned copy loop. Copies 128 bytes at a time. */
102 /* for the 2nd + iteration of this loop. */
124 bdnz L(aligned_128head)
159 4: /* Copies 4~7 bytes. */
169 /* Return original DST pointer. */
172 /* Handle copies of 0~31 bytes. */
180 /* At least 9 bytes to go. */
184 beq L(copy_LT_32_aligned)
186 /* Force 4-byte alignment for SRC. */
196 bf 31,L(end_4bytes_alignment)
203 L(end_4bytes_alignment):
207 L(copy_LT_32_aligned):
208 /* At least 6 bytes to go, and SRC is word-aligned. */
222 8: /* Copy 8 bytes. */
232 /* Copies 4~7 bytes. */
243 /* Return original DST pointer. */
247 /* Copies 2~3 bytes. */
269 /* Return original DST pointer. */
272 /* Handles copies of 0~8 bytes. */
277 /* Though we could've used ld/std here, they are still
278 slow for unaligned cases. */
287 /* Handle copies of 32+ bytes where DST is aligned (to quadword) but
288 SRC is not. Use aligned quadword loads from SRC, shifted to realign
289 the data, allowing for aligned DST stores. */
291 L(copy_GE_32_unaligned):
292 clrldi 0,0,60 /* Number of bytes until the 1st r11 quadword. */
293 srdi 9,r5,4 /* Number of full quadwords remaining. */
295 beq L(copy_GE_32_unaligned_cont)
297 /* DST is not quadword aligned, get it aligned. */
302 /* Vector instructions work best when proper alignment (16-bytes)
303 is present. Move 0~15 bytes as needed to get DST quadword-aligned. */
329 srdi 9,r5,4 /* Number of full quadwords remaining. */
331 /* The proper alignment is present, it is OK to copy the bytes now. */
332 L(copy_GE_32_unaligned_cont):
334 /* Setup two indexes to speed up the indexed vector operations. */
336 li 6,16 /* Index for 16-bytes offsets. */
337 li 7,32 /* Index for 32-bytes offsets. */
339 srdi 8,r5,5 /* Setup the loop counter. */
342 #ifdef __LITTLE_ENDIAN__
349 bf 31,L(setup_unaligned_loop)
351 /* Copy another 16 bytes to align to 32-bytes due to the loop. */
353 #ifdef __LITTLE_ENDIAN__
364 L(setup_unaligned_loop):
366 ble cr6,L(end_unaligned_loop)
368 /* Copy 32 bytes at a time using vector instructions. */
372 /* Note: vr6/vr10 may contain data that was already copied,
373 but in order to get proper alignment, we may have to copy
374 some portions again. This is faster than having unaligned
375 vector instructions though. */
378 #ifdef __LITTLE_ENDIAN__
384 #ifdef __LITTLE_ENDIAN__
393 bdnz L(unaligned_loop)
398 L(end_unaligned_loop):
400 /* Check for tail bytes. */
406 /* We have 1~15 tail bytes to copy, and DST is quadword aligned. */
415 4: /* Copy 4~7 bytes. */
425 /* Return original DST pointer. */
428 /* Start to memcpy backward implementation: the algorith first check if
429 src and dest have the same alignment and if it does align both to 16
430 bytes and copy using VSX instructions.
431 If does not, align dest to 16 bytes and use VMX (altivec) instruction
432 to read two 16 bytes at time, shift/permute the bytes read and write
436 /* Copy is done backwards: update the pointers and check alignment. */
440 ble cr1, L(copy_LT_32_bwd) /* If move < 32 bytes use short move
443 andi. r10,r11,15 /* Check if r11 is aligned to 16 bytes */
444 clrldi r9,r4,60 /* Check if r4 is aligned to 16 bytes */
445 cmpld cr6,r10,r9 /* SRC and DST alignments match? */
447 bne cr6,L(copy_GE_32_unaligned_bwd)
448 beq L(aligned_copy_bwd)
453 /* Get the DST and SRC aligned to 16 bytes. */
481 /* Main aligned copy loop. Copies 128 bytes at a time. */
490 beq L(aligned_tail_bwd)
494 b L(aligned_128loop_bwd)
497 L(aligned_128head_bwd):
498 /* for the 2nd + iteration of this loop. */
501 L(aligned_128loop_bwd):
520 bdnz L(aligned_128head_bwd)
555 4: /* Copies 4~7 bytes. */
565 /* Return original DST pointer. */
568 /* Handle copies of 0~31 bytes. */
573 ble cr6,L(copy_LE_8_bwd)
575 /* At least 9 bytes to go. */
579 beq L(copy_LT_32_aligned_bwd)
581 /* Force 4-byte alignment for SRC. */
591 bf 31,L(end_4bytes_alignment_bwd)
598 L(end_4bytes_alignment_bwd):
602 L(copy_LT_32_aligned_bwd):
603 /* At least 6 bytes to go, and SRC is word-aligned. */
617 8: /* Copy 8 bytes. */
627 /* Copies 4~7 bytes. */
638 /* Return original DST pointer. */
642 /* Copies 2~3 bytes. */
664 /* Return original DST pointer. */
668 /* Handles copies of 0~8 bytes. */
673 /* Though we could've used ld/std here, they are still
674 slow for unaligned cases. */
682 /* Handle copies of 32+ bytes where DST is aligned (to quadword) but
683 SRC is not. Use aligned quadword loads from SRC, shifted to realign
684 the data, allowing for aligned DST stores. */
686 L(copy_GE_32_unaligned_bwd):
687 andi. r10,r11,15 /* Check alignment of DST against 16 bytes.. */
688 srdi r9,r5,4 /* Number of full quadwords remaining. */
690 beq L(copy_GE_32_unaligned_cont_bwd)
692 /* DST is not quadword aligned and r10 holds the address masked to
693 compare alignments. */
697 /* Vector instructions work best when proper alignment (16-bytes)
698 is present. Move 0~15 bytes as needed to get DST quadword-aligned. */
724 srdi r9,r5,4 /* Number of full quadwords remaining. */
726 /* The proper alignment is present, it is OK to copy the bytes now. */
727 L(copy_GE_32_unaligned_cont_bwd):
729 /* Setup two indexes to speed up the indexed vector operations. */
731 li r6,-16 /* Index for 16-bytes offsets. */
732 li r7,-32 /* Index for 32-bytes offsets. */
734 srdi r8,r5,5 /* Setup the loop counter. */
737 #ifdef __LITTLE_ENDIAN__
744 bf 31,L(setup_unaligned_loop_bwd)
746 /* Copy another 16 bytes to align to 32-bytes due to the loop. */
748 #ifdef __LITTLE_ENDIAN__
759 L(setup_unaligned_loop_bwd):
761 ble cr6,L(end_unaligned_loop_bwd)
763 /* Copy 32 bytes at a time using vector instructions. */
765 L(unaligned_loop_bwd):
767 /* Note: vr6/vr10 may contain data that was already copied,
768 but in order to get proper alignment, we may have to copy
769 some portions again. This is faster than having unaligned
770 vector instructions though. */
773 #ifdef __LITTLE_ENDIAN__
779 #ifdef __LITTLE_ENDIAN__
788 bdnz L(unaligned_loop_bwd)
793 L(end_unaligned_loop_bwd):
795 /* Check for tail bytes. */
801 /* We have 1~15 tail bytes to copy, and DST is quadword aligned. */
810 4: /* Copy 4~7 bytes. */
820 /* Return original DST pointer. */
822 END_GEN_TB (MEMMOVE, TB_TOCLESS)
823 libc_hidden_builtin_def (memmove)