1 /* strcmp implementation for ARMv7-A, optimized for Cortex-A15.
2 Copyright (C) 2012-2023 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library. If not, see
17 <https://www.gnu.org/licenses/>. */
19 #include <arm-features.h>
22 /* Implementation of strcmp for ARMv7 when DSP instructions are
23 available. Use ldrd to support wider loads, provided the data
24 is sufficiently aligned. Use saturating arithmetic to optimize
28 STRCMP_PRECHECK: Run a quick pre-check of the first byte in the
29 string. If comparing completely random strings the pre-check will
30 save time, since there is a very high probability of a mismatch in
31 the first character: we save significant overhead if this is the
32 common case. However, if strings are likely to be identical (e.g.
33 because we're verifying a hit in a hash table), then this check
34 is largely redundant. */
36 #define STRCMP_PRECHECK 1
40 #ifdef __ARM_BIG_ENDIAN
44 # define MSB 0x000000ff
45 # define LSB 0xff000000
46 # define BYTE0_OFFSET 24
47 # define BYTE1_OFFSET 16
48 # define BYTE2_OFFSET 8
49 # define BYTE3_OFFSET 0
50 #else /* not __ARM_BIG_ENDIAN */
54 # define BYTE0_OFFSET 0
55 # define BYTE1_OFFSET 8
56 # define BYTE2_OFFSET 16
57 # define BYTE3_OFFSET 24
58 # define MSB 0xff000000
59 # define LSB 0x000000ff
60 #endif /* not __ARM_BIG_ENDIAN */
62 /* Parameters and result. */
65 #define result r0 /* Overlaps src1. */
67 /* Internal variables. */
72 /* Additional internal variables for 64-bit aligned data. */
77 #define syndrome_a tmp1
78 #define syndrome_b tmp2
80 /* Additional internal variables for 32-bit aligned data. */
88 /* In Thumb code we can't use MVN with a register shift, but we do have ORN. */
89 .macro prepare_mask mask_reg, nbits_reg
90 S2HI \mask_reg, const_m1, \nbits_reg
92 .macro apply_mask data_reg, mask_reg
93 orn \data_reg, \data_reg, \mask_reg
96 /* Macro to compute and return the result value for word-aligned
98 .macro strcmp_epilogue_aligned synd d1 d2 restore_r6
99 #ifdef __ARM_BIG_ENDIAN
100 /* If data1 contains a zero byte, then syndrome will contain a 1 in
101 bit 7 of that byte. Otherwise, the highest set bit in the
102 syndrome will highlight the first different bit. It is therefore
103 sufficient to extract the eight bits starting with the syndrome
108 ldrd r6, r7, [sp, #8]
112 ldrd r4, r5, [sp], #16
114 cfi_def_cfa_offset (0)
119 sub result, result, r1, lsr #24
122 /* To use the big-endian trick we'd have to reverse all three words.
123 that's slower than this approach. */
129 ldrd r6, r7, [sp, #8]
132 and result, \d1, #255
134 ldrd r4, r5, [sp], #16
136 cfi_def_cfa_offset (0)
141 sub result, result, r1
150 #if STRCMP_PRECHECK == 1
157 #if STRCMP_PRECHECK == 1
165 strd r4, r5, [sp, #-16]!
166 cfi_def_cfa_offset (16)
170 strd r6, r7, [sp, #8]
175 cbz r2, .Lloop_aligned8
182 /* Deal with mutual misalignment by aligning downwards and then
183 masking off the unwanted loaded data to prevent a difference. */
188 lsl tmp2, tmp2, #3 /* Bytes -> bits. */
189 ldrd data1a, data1b, [src1], #16
191 ldrd data2a, data2b, [src2], #16
192 prepare_mask tmp1, tmp2
193 apply_mask data1a, tmp1
194 apply_mask data2a, tmp1
195 beq .Lstart_realigned8
196 apply_mask data1b, tmp1
198 apply_mask data2b, tmp1
202 /* Unwind the inner loop by a factor of 2, giving 16 bytes per
204 .p2align 5,,12 /* Don't start in the tail bytes of a cache line. */
205 .p2align 2 /* Always word aligned. */
207 ldrd data1a, data1b, [src1], #16
208 ldrd data2a, data2b, [src2], #16
210 uadd8 syndrome_b, data1a, const_m1 /* Only want GE bits, */
211 eor syndrome_a, data1a, data2a
212 sel syndrome_a, syndrome_a, const_m1
213 cbnz syndrome_a, .Ldiff_in_a
214 uadd8 syndrome_b, data1b, const_m1 /* Only want GE bits. */
215 eor syndrome_b, data1b, data2b
216 sel syndrome_b, syndrome_b, const_m1
217 cbnz syndrome_b, .Ldiff_in_b
219 ldrd data1a, data1b, [src1, #-8]
220 ldrd data2a, data2b, [src2, #-8]
221 uadd8 syndrome_b, data1a, const_m1 /* Only want GE bits, */
222 eor syndrome_a, data1a, data2a
223 sel syndrome_a, syndrome_a, const_m1
224 uadd8 syndrome_b, data1b, const_m1 /* Only want GE bits. */
225 eor syndrome_b, data1b, data2b
226 sel syndrome_b, syndrome_b, const_m1
227 /* Can't use CBZ for backwards branch. */
228 orrs syndrome_b, syndrome_b, syndrome_a /* Only need if s_a == 0 */
232 cbnz syndrome_a, .Ldiff_in_a
235 strcmp_epilogue_aligned syndrome_b, data1b, data2b 1
239 strcmp_epilogue_aligned syndrome_a, data1a, data2a 1
248 /* Unrolled by a factor of 2, to reduce the number of post-increment
251 ldr data1, [src1], #8
252 ldr data2, [src2], #8
254 uadd8 syndrome, data1, const_m1 /* Only need GE bits. */
255 eor syndrome, data1, data2
256 sel syndrome, syndrome, const_m1
257 cbnz syndrome, .Laligned4_done
258 ldr data1, [src1, #-4]
259 ldr data2, [src2, #-4]
260 uadd8 syndrome, data1, const_m1
261 eor syndrome, data1, data2
262 sel syndrome, syndrome, const_m1
267 strcmp_epilogue_aligned syndrome, data1, data2, 0
271 /* Deal with mutual misalignment by aligning downwards and then
272 masking off the unwanted loaded data to prevent a difference. */
273 lsl tmp1, tmp1, #3 /* Bytes -> bits. */
275 ldr data1, [src1], #8
277 ldr data2, [src2], #8
279 prepare_mask tmp1, tmp1
280 apply_mask data1, tmp1
281 apply_mask data2, tmp1
290 ldr data1, [src1], #4
294 #if STRCMP_PRECHECK == 0
295 ldrb data2, [src2, #1]
296 uxtb tmp1, data1, ror #BYTE1_OFFSET
297 subs tmp1, tmp1, data2
298 bne .Lmisaligned_exit
299 cbz data2, .Lmisaligned_exit
302 ldrb data2, [src2, #2]
303 uxtb tmp1, data1, ror #BYTE2_OFFSET
304 subs tmp1, tmp1, data2
305 bne .Lmisaligned_exit
306 cbz data2, .Lmisaligned_exit
309 ldrb data2, [src2, #3]
310 uxtb tmp1, data1, ror #BYTE3_OFFSET
311 subs tmp1, tmp1, data2
312 bne .Lmisaligned_exit
314 cbnz data2, .Lsrc1_aligned
315 #else /* STRCMP_PRECHECK */
316 /* If we've done the pre-check, then we don't need to check the
317 first byte again here. */
318 ldrb data2, [src2, #2]
319 uxtb tmp1, data1, ror #BYTE2_OFFSET
320 subs tmp1, tmp1, data2
321 bne .Lmisaligned_exit
322 cbz data2, .Lmisaligned_exit
325 ldrb data2, [src2, #3]
326 uxtb tmp1, data1, ror #BYTE3_OFFSET
327 subs tmp1, tmp1, data2
328 bne .Lmisaligned_exit
329 cbnz data2, .Laligned_m1
336 cfi_def_cfa_offset (0)
343 #if STRCMP_PRECHECK == 1
349 /* src1 is word aligned, but src2 has no common alignment
351 ldr data1, [src1], #4
352 lsls tmp1, src2, #31 /* C=src2[1], Z=src2[0]. */
355 ldr data2, [src2], #4
356 bhi .Loverlap1 /* C=1, Z=0 => src2[1:0] = 0b11. */
357 bcs .Loverlap2 /* C=1, Z=1 => src2[1:0] = 0b10. */
359 /* (overlap3) C=0, Z=0 => src2[1:0] = 0b01. */
361 bic tmp1, data1, #MSB
362 uadd8 syndrome, data1, const_m1
363 eors syndrome, tmp1, data2, S2LO #8
364 sel syndrome, syndrome, const_m1
367 ldr data2, [src2], #4
368 eor tmp1, tmp1, data1
369 cmp tmp1, data2, S2HI #24
371 ldr data1, [src1], #4
374 S2LO data2, data2, #8
378 bics syndrome, syndrome, #MSB
379 bne .Lstrcmp_done_equal
381 /* We can only get here if the MSB of data1 contains 0, so
382 fast-path the exit. */
384 ldrd r4, r5, [sp], #16
386 cfi_def_cfa_offset (0)
389 /* R6/7 Not used in this sequence. */
397 S2LO data1, data1, #24
398 and data2, data2, #LSB
401 .p2align 5,,12 /* Ensure at least 3 instructions in cache line. */
403 and tmp1, data1, const_m1, S2LO #16
404 uadd8 syndrome, data1, const_m1
405 eors syndrome, tmp1, data2, S2LO #16
406 sel syndrome, syndrome, const_m1
409 ldr data2, [src2], #4
410 eor tmp1, tmp1, data1
411 cmp tmp1, data2, S2HI #16
413 ldr data1, [src1], #4
416 S2LO data2, data2, #16
419 ands syndrome, syndrome, const_m1, S2LO #16
420 bne .Lstrcmp_done_equal
423 S2LO data1, data1, #16
424 #ifdef __ARM_BIG_ENDIAN
425 lsl data2, data2, #16
430 S2LO data1, data1, #16
431 and data2, data2, const_m1, S2LO #16
434 .p2align 5,,12 /* Ensure at least 3 instructions in cache line. */
436 and tmp1, data1, #LSB
437 uadd8 syndrome, data1, const_m1
438 eors syndrome, tmp1, data2, S2LO #24
439 sel syndrome, syndrome, const_m1
442 ldr data2, [src2], #4
443 eor tmp1, tmp1, data1
444 cmp tmp1, data2, S2HI #8
446 ldr data1, [src1], #4
449 S2LO data2, data2, #24
453 bne .Lstrcmp_done_equal
456 S2LO data1, data1, #8
457 bic data2, data2, #MSB
462 ldrd r4, r5, [sp], #16
464 cfi_def_cfa_offset (0)
467 /* R6/7 not used in this sequence. */
474 #ifndef __ARM_BIG_ENDIAN
477 /* Now everything looks big-endian... */
479 uadd8 tmp1, data1, const_m1
480 eor tmp1, data1, data2
481 sel syndrome, tmp1, const_m1
483 lsl data1, data1, tmp1
484 lsl data2, data2, tmp1
485 lsr result, data1, #24
486 ldrd r4, r5, [sp], #16
487 cfi_def_cfa_offset (0)
490 /* R6/7 not used in this sequence. */
493 sub result, result, data2, lsr #24
496 libc_hidden_builtin_def (strcmp)