1 /* Optimized strncmp implementation for PowerPC64/POWER9.
2 Copyright (C) 2016-2018 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
20 /* Implements the function
22 int [r3] strncmp (const char *s1 [r3], const char *s2 [r4], size_t [r5] n)
24 The implementation uses unaligned doubleword access to avoid specialized
25 code paths depending of data alignment for first 32 bytes and uses
26 vectorised loops after that. */
29 # define STRNCMP strncmp
32 /* TODO: Change this to actual instructions when minimum binutils is upgraded
33 to 2.27. Macros are defined below for these newer instructions in order
34 to maintain compatibility. */
35 #define VCTZLSBB(r,v) .long (0x10010602 | ((r)<<(32-11)) | ((v)<<(32-21)))
37 #define VEXTUBRX(t,a,b) .long (0x1000070d \
42 #define VCMPNEZB(t,a,b) .long (0x10000507 \
47 /* Get 16 bytes for unaligned case.
48 reg1: Vector to hold next 16 bytes.
49 reg2: Address to read from.
50 reg3: Permute control vector. */
51 #define GET16BYTES(reg1, reg2, reg3) \
53 vperm v8, v2, reg1, reg3; \
54 vcmpequb. v8, v0, v8; \
65 vperm reg1, v9, reg1, reg3;
67 /* TODO: change this to .machine power9 when minimum binutils
68 is upgraded to 2.27. */
70 ENTRY_TOCLESS (STRNCMP, 4)
71 /* Check if size is 0. */
76 /* Check if [s1]+32 or [s2]+32 will cross a 4K page boundary using
79 (((size_t) s1) % PAGE_SIZE > (PAGE_SIZE - ITER_SIZE))
81 with PAGE_SIZE being 4096 and ITER_SIZE begin 32. */
83 cmpldi cr7, r8, 4096-32
86 cmpldi cr7, r9, 4096-32
89 /* For short strings up to 32 bytes, load both s1 and s2 using
90 unaligned dwords and compare. */
98 bne cr0, L(different1)
100 /* If the strings compared are equal, but size is less or equal
112 bne cr0, L(different1)
116 /* Update pointers and size. */
127 bne cr0, L(different1)
138 bne cr0, L(different1)
143 /* Update pointers and size. */
148 /* Now it has checked for first 32 bytes, align source1 to doubleword
149 and adjust source2 address. */
155 lvsr v6, 0, r4 /* Compute mask. */
160 /* Both s1 and s2 are unaligned. */
161 GET16BYTES(v5, r4, v6)
162 lvsr v10, 0, r3 /* Compute mask. */
165 GET16BYTES(v4, r3, v10)
170 /* Align s1 to qw and adjust s2 address. */
183 /* There are 2 loops depending on the input alignment.
184 Each loop gets 16 bytes from s1 and s2, checks for null
185 and compares them. Loops until a mismatch or null occurs. */
188 GET16BYTES(v5, r4, v6)
190 bne cr6, L(different)
198 GET16BYTES(v5, r4, v6)
200 bne cr6, L(different)
208 GET16BYTES(v5, r4, v6)
210 bne cr6, L(different)
218 GET16BYTES(v5, r4, v6)
220 bne cr6, L(different)
232 bne cr6, L(different)
242 bne cr6, L(different)
252 bne cr6, L(different)
262 bne cr6, L(different)
269 /* Calculate and return the difference. */
287 /* The code now checks if r8 and r5 are different by issuing a
288 cmpb and shifts the result based on its output:
290 leadzero = (__builtin_ffsl (z1) - 1);
291 leadzero = leadzero > (n-1)*8 ? (n-1)*8 : leadzero;
292 r1 = (r1 >> leadzero) & 0xFFUL;
293 r2 = (r2 >> leadzero) & 0xFFUL;
306 ble cr7, L(different2)
319 /* If unaligned 16 bytes reads across a 4K page boundary, it uses
320 a simple byte a byte comparison until the page alignment for s1
328 bne cr7, L(byte_ne_3)
330 beq cr7, L(byte_ne_0)
346 bne cr7, L(byte_ne_2)
347 beq cr5, L(byte_ne_0)
352 bdnz L(pagecross_loop0)
375 libc_hidden_builtin_def(strncmp)