1 /* Generic optimized memcpy using SIMD.
2 Copyright (C) 2012-2023 Free Software Foundation, Inc.
4 This file is part of the GNU C Library.
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public
8 License as published by the Free Software Foundation; either
9 version 2.1 of the License, or (at your option) any later version.
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library. If not, see
18 <https://www.gnu.org/licenses/>. */
24 * ARMv8-a, AArch64, Advanced SIMD, unaligned accesses.
53 # define MEMMOVE memmove
56 # define MEMCPY memcpy
59 /* This implementation supports both memcpy and memmove and shares most code.
60 It uses unaligned accesses and branchless sequences to keep the code small,
61 simple and improve performance.
63 Copies are split into 3 main cases: small copies of up to 32 bytes, medium
64 copies of up to 128 bytes, and large copies. The overhead of the overlap
65 check in memmove is negligible since it is only required for large copies.
67 Large copies use a software pipelined loop processing 64 bytes per
68 iteration. The destination pointer is 16-byte aligned to minimize
69 unaligned accesses. The loop tail is handled by always copying 64 bytes
77 add srcend, src, count
78 add dstend, dstin, count
84 /* Small copies: 0..32 bytes. */
88 ldr B_q, [srcend, -16]
90 str B_q, [dstend, -16]
93 /* Copy 8-15 bytes. */
95 tbz count, 3, L(copy8)
102 /* Copy 4-7 bytes. */
104 tbz count, 2, L(copy4)
106 ldr B_lw, [srcend, -4]
108 str B_lw, [dstend, -4]
111 /* Copy 0..3 bytes using a branchless sequence. */
116 ldrb C_lw, [srcend, -1]
117 ldrb B_lw, [src, tmp1]
119 strb B_lw, [dstin, tmp1]
120 strb C_lw, [dstend, -1]
125 /* Medium copies: 33..128 bytes. */
128 ldp C_q, D_q, [srcend, -32]
131 stp A_q, B_q, [dstin]
132 stp C_q, D_q, [dstend, -32]
136 /* Copy 65..128 bytes. */
138 ldp E_q, F_q, [src, 32]
141 ldp G_q, H_q, [srcend, -64]
142 stp G_q, H_q, [dstend, -64]
144 stp A_q, B_q, [dstin]
145 stp E_q, F_q, [dstin, 32]
146 stp C_q, D_q, [dstend, -32]
149 /* Align loop64 below to 16 bytes. */
152 /* Copy more than 128 bytes. */
154 /* Copy 16 bytes and then align src to 16-byte alignment. */
159 add count, count, tmp1 /* Count is now 16 too large. */
160 ldp A_q, B_q, [src, 16]
162 ldp C_q, D_q, [src, 48]
163 subs count, count, 128 + 16 /* Test and readjust count. */
164 b.ls L(copy64_from_end)
166 stp A_q, B_q, [dst, 16]
167 ldp A_q, B_q, [src, 80]
168 stp C_q, D_q, [dst, 48]
169 ldp C_q, D_q, [src, 112]
172 subs count, count, 64
175 /* Write the last iteration and copy 64 bytes from the end. */
177 ldp E_q, F_q, [srcend, -64]
178 stp A_q, B_q, [dst, 16]
179 ldp A_q, B_q, [srcend, -32]
180 stp C_q, D_q, [dst, 48]
181 stp E_q, F_q, [dstend, -64]
182 stp A_q, B_q, [dstend, -32]
186 libc_hidden_builtin_def (MEMCPY)
194 add srcend, src, count
195 add dstend, dstin, count
201 /* Small moves: 0..32 bytes. */
205 ldr B_q, [srcend, -16]
207 str B_q, [dstend, -16]
211 /* Only use backward copy if there is an overlap. */
217 /* Large backwards copy for overlapping copies.
218 Copy 16 bytes and then align srcend to 16-byte alignment. */
219 L(copy_long_backwards):
220 ldr D_q, [srcend, -16]
222 bic srcend, srcend, 15
223 sub count, count, tmp1
224 ldp A_q, B_q, [srcend, -32]
225 str D_q, [dstend, -16]
226 ldp C_q, D_q, [srcend, -64]
227 sub dstend, dstend, tmp1
228 subs count, count, 128
229 b.ls L(copy64_from_start)
232 str B_q, [dstend, -16]
233 str A_q, [dstend, -32]
234 ldp A_q, B_q, [srcend, -96]
235 str D_q, [dstend, -48]
236 str C_q, [dstend, -64]!
237 ldp C_q, D_q, [srcend, -128]
238 sub srcend, srcend, 64
239 subs count, count, 64
240 b.hi L(loop64_backwards)
242 /* Write the last iteration and copy 64 bytes from the start. */
243 L(copy64_from_start):
244 ldp E_q, F_q, [src, 32]
245 stp A_q, B_q, [dstend, -32]
247 stp C_q, D_q, [dstend, -64]
248 stp E_q, F_q, [dstin, 32]
249 stp A_q, B_q, [dstin]
254 libc_hidden_builtin_def (MEMMOVE)