2 Copyright (C) 2022-2023 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
19 #include <isa-level.h>
21 #if ISA_SHOULD_BUILD (3)
26 # include "x86-avx-vecs.h"
30 # define STRNCAT __strncat_avx2
35 # define VPCMPEQ vpcmpeqd
36 # define VPMIN vpminud
40 # define VPCMPEQ vpcmpeqb
41 # define VPMIN vpminub
45 # include "strncpy-or-cat-overflow-def.h"
47 # define PAGE_SIZE 4096
50 # define VZERO_128 VMM_128(7)
52 .section SECTION(.text), "ax", @progbits
55 /* Clear the upper 32 bits. */
58 /* Filter zero length strings and very long strings. Zero
59 length strings just return, very long strings are handled by
60 using the non-length variant {wcs|str}cat. */
71 vpxor %VZERO_128, %VZERO_128, %VZERO_128
73 # include "strcat-strlen-avx2.h.S"
76 andl $(PAGE_SIZE - 1), %ecx
77 cmpl $(PAGE_SIZE - VEC_SIZE), %ecx
79 L(page_cross_continue):
81 VPCMPEQ %VMM(0), %VZERO, %VMM(6)
82 vpmovmskb %VMM(6), %ecx
91 /* Hoist this to save code size. */
104 # ifdef USE_AS_WCSCPY
105 vmovd %VMM_128(0), (%rdi)
106 MOVCHAR $0, (%rdi, %rdx)
116 /* NB: make this `vmovw` if support for AVX512-FP16 is added.
124 MOVCHAR $0, (%rdi, %rdx)
129 movl -(4)(%rsi, %rdx), %ecx
131 movl %ecx, -(4)(%rdi, %rdx)
132 MOVCHAR $0, (%rdi, %rdx)
139 VMOVU -(16)(%rsi, %rdx), %xmm1
141 VMOVU %xmm1, -(16)(%rdi, %rdx)
142 MOVCHAR $0, (%rdi, %rdx)
147 movq -(8)(%rsi, %rdx), %rcx
149 movq %rcx, -(8)(%rdi, %rdx)
150 MOVCHAR $0, (%rdi, %rdx)
156 VMOVU %VMM(0), (%rdi)
158 /* Align rsi (src) and just rdx/rdi (length/dst). */
161 orq $(VEC_SIZE - 1), %rsi
166 VMOVA 0(%rsi), %VMM(1)
167 VPCMPEQ %VMM(1), %VZERO, %VMM(6)
168 vpmovmskb %VMM(6), %ecx
169 cmpq $(VEC_SIZE * 2), %rdx
174 jbe L(ret_vec_x1_len)
179 VMOVA (VEC_SIZE * 1)(%rsi), %VMM(2)
180 VMOVU %VMM(1), (%rdi)
181 VPCMPEQ %VMM(2), %VZERO, %VMM(6)
182 vpmovmskb %VMM(6), %ecx
183 addl $-VEC_SIZE, %edx
184 bzhil %edx, %ecx, %r8d
189 VMOVU (%rsi, %rdx), %VMM(0)
190 MOVCHAR $0, (VEC_SIZE)(%rdi, %rdx)
191 VMOVU %VMM(0), (%rdi, %rdx)
192 L(return_vzeroupper):
193 ZERO_UPPER_VEC_REGISTERS_RETURN
200 VMOVU -(VEC_SIZE)(%rsi, %rcx), %VMM(1)
201 MOVCHAR $0, (%rdi, %rcx)
202 VMOVU %VMM(1), -VEC_SIZE(%rdi, %rcx)
207 subq $-(VEC_SIZE * 4), %rsi
208 VMOVA 0(%rsi), %VMM(1)
209 VPCMPEQ %VMM(1), %VZERO, %VMM(6)
210 vpmovmskb %VMM(6), %ecx
211 subq $-(VEC_SIZE * 4), %rdi
212 addl $-(VEC_SIZE * 4), %edx
213 cmpl $(VEC_SIZE * 2), %edx
217 /* L(ret_vec_x1) expects ecx to have position of first match so
222 VMOVA (VEC_SIZE * 1)(%rsi), %VMM(2)
223 VMOVU %VMM(1), (%rdi)
225 VPCMPEQ %VMM(2), %VZERO, %VMM(6)
226 vpmovmskb %VMM(6), %ecx
231 VMOVA (VEC_SIZE * 2)(%rsi), %VMM(3)
232 VMOVU %VMM(2), (VEC_SIZE * 1)(%rdi)
234 VPCMPEQ %VMM(3), %VZERO, %VMM(6)
235 vpmovmskb %VMM(6), %ecx
237 /* Check if length is greater than 4x VEC. */
238 cmpq $(VEC_SIZE * 4), %rdx
241 addl $(VEC_SIZE * -2), %edx
245 jbe L(ret_vec_x3_len)
250 VMOVA (VEC_SIZE * 3 + 0)(%rsi), %VMM(4)
251 VMOVU %VMM(3), (VEC_SIZE * 2 + 0)(%rdi)
252 VPCMPEQ %VMM(4), %VZERO, %VMM(6)
253 vpmovmskb %VMM(6), %ecx
254 addl $-VEC_SIZE, %edx
255 bzhil %edx, %ecx, %r8d
260 VMOVU (VEC_SIZE * 2)(%rsi, %rdx), %VMM(0)
261 MOVCHAR $0, (VEC_SIZE * 3)(%rdi, %rdx)
262 VMOVU %VMM(0), (VEC_SIZE * 2)(%rdi, %rdx)
269 VMOVU (VEC_SIZE)(%rsi, %rcx), %VMM(0)
270 MOVCHAR $0, (VEC_SIZE * 2)(%rdi, %rcx)
271 VMOVU %VMM(0), (VEC_SIZE)(%rdi, %rcx)
280 VMOVA (VEC_SIZE * 3)(%rsi), %VMM(4)
281 VMOVU %VMM(3), (VEC_SIZE * 2)(%rdi)
282 VPCMPEQ %VMM(4), %VZERO, %VMM(6)
283 vpmovmskb %VMM(6), %ecx
287 VMOVU %VMM(4), (VEC_SIZE * 3)(%rdi)
290 /* Recheck length before aligning. */
291 cmpq $(VEC_SIZE * 8), %rdx
294 /* Align rsi (src) and just rdx/rdi (length/dst). */
297 subq $-(VEC_SIZE * 4), %rsi
298 andq $(VEC_SIZE * -4), %rsi
300 /* Do first half of loop ahead of time so loop can just start by
302 VMOVA (VEC_SIZE * 0 + 0)(%rsi), %VMM(0)
303 VMOVA (VEC_SIZE * 1 + 0)(%rsi), %VMM(1)
304 VMOVA (VEC_SIZE * 2 + 0)(%rsi), %VMM(2)
305 VMOVA (VEC_SIZE * 3 + 0)(%rsi), %VMM(3)
307 VPMIN %VMM(0), %VMM(1), %VMM(4)
308 VPMIN %VMM(2), %VMM(3), %VMM(6)
309 VPMIN %VMM(4), %VMM(6), %VMM(6)
310 VPCMPEQ %VMM(6), %VZERO, %VMM(6)
311 vpmovmskb %VMM(6), %r8d
316 /* Use r9 for end of region before handling last 4x VEC
318 leaq -(VEC_SIZE * 4)(%rdx), %r9
323 VMOVU %VMM(0), (VEC_SIZE * 0 + 0)(%rdi)
324 VMOVU %VMM(1), (VEC_SIZE * 1 + 0)(%rdi)
325 subq $(VEC_SIZE * -4), %rsi
326 VMOVU %VMM(2), (VEC_SIZE * 2 + 0)(%rdi)
327 VMOVU %VMM(3), (VEC_SIZE * 3 + 0)(%rdi)
329 subq $(VEC_SIZE * -4), %rdi
331 jbe L(loop_last_4x_vec)
333 VMOVA (VEC_SIZE * 0 + 0)(%rsi), %VMM(0)
334 VMOVA (VEC_SIZE * 1 + 0)(%rsi), %VMM(1)
335 VMOVA (VEC_SIZE * 2 + 0)(%rsi), %VMM(2)
336 VMOVA (VEC_SIZE * 3 + 0)(%rsi), %VMM(3)
338 VPMIN %VMM(0), %VMM(1), %VMM(4)
339 VPMIN %VMM(2), %VMM(3), %VMM(6)
340 VPMIN %VMM(4), %VMM(6), %VMM(6)
341 VPCMPEQ %VMM(6), %VZERO, %VMM(6)
343 vpmovmskb %VMM(6), %r8d
349 VPCMPEQ %VMM(0), %VZERO, %VMM(6)
350 vpmovmskb %VMM(6), %ecx
351 /* L(ret_vec_x1) expects ecx to have position of first match so
355 VMOVU %VMM(0), (VEC_SIZE * 0 + 0)(%rdi)
357 VPCMPEQ %VMM(1), %VZERO, %VMM(6)
358 vpmovmskb %VMM(6), %ecx
362 VMOVU %VMM(1), (VEC_SIZE * 1 + 0)(%rdi)
364 VPCMPEQ %VMM(2), %VZERO, %VMM(6)
365 vpmovmskb %VMM(6), %ecx
369 VMOVU %VMM(2), (VEC_SIZE * 2 + 0)(%rdi)
371 VMOVU (VEC_SIZE * 2 + CHAR_SIZE)(%rsi, %r8), %VMM(1)
372 VMOVU %VMM(1), (VEC_SIZE * 2 + CHAR_SIZE)(%rdi, %r8)
380 andq $(VEC_SIZE * -1), %r8
382 VPCMPEQ (%r8), %VZERO, %VMM(6)
384 vpmovmskb %VMM(6), %ecx
385 shrxl %esi, %ecx, %ecx
388 andl $(VEC_SIZE - 1), %r8d
390 jb L(page_cross_small)
392 /* Optimizing more aggressively for space as this is very cold
393 code. This saves 2x cache lines. */
395 /* This adds once to the later result which will get correct
396 copy bounds. NB: this can never zero-out a non-zero RCX as
397 to be in the page cross case rsi cannot be aligned and we
398 already right-shift rcx by the misalignment. */
399 shll $CHAR_SIZE, %ecx
400 jz L(page_cross_continue)
407 jz L(page_cross_setz)
415 # ifdef USE_AS_WCSCPY