1 /* Optimized strcmp implementation for PowerPC64/POWER9.
2 Copyright (C) 2016-2018 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
18 #ifdef __LITTLE_ENDIAN__
22 # define STRCMP strcmp
25 /* Implements the function
27 int [r3] strcmp (const char *s1 [r3], const char *s2 [r4])
29 The implementation uses unaligned doubleword access for first 32 bytes
30 as in POWER8 patch and uses vectorised loops after that. */
32 /* TODO: Change this to actual instructions when minimum binutils is upgraded
33 to 2.27. Macros are defined below for these newer instructions in order
34 to maintain compatibility. */
35 # define VCTZLSBB(r,v) .long (0x10010602 | ((r)<<(32-11)) | ((v)<<(32-21)))
37 # define VEXTUBRX(t,a,b) .long (0x1000070d \
42 # define VCMPNEZB(t,a,b) .long (0x10000507 \
47 /* Get 16 bytes for unaligned case.
48 reg1: Vector to hold next 16 bytes.
49 reg2: Address to read from.
50 reg3: Permute control vector. */
51 # define GET16BYTES(reg1, reg2, reg3) \
53 vperm v8, v2, reg1, reg3; \
54 vcmpequb. v8, v0, v8; \
63 vperm reg1, v9, reg1, reg3;
65 /* TODO: change this to .machine power9 when the minimum required binutils
69 ENTRY_TOCLESS (STRCMP, 4)
72 /* Check if [s1]+16 or [s2]+16 will cross a 4K page boundary using
75 (((size_t) s1) % PAGE_SIZE > (PAGE_SIZE - ITER_SIZE))
77 with PAGE_SIZE being 4096 and ITER_SIZE begin 16. */
81 cmpldi cr7, r7, 4096-16
82 bgt cr7, L(pagecross_check)
83 cmpldi cr5, r9, 4096-16
84 bgt cr5, L(pagecross_check)
86 /* For short strings up to 16 bytes, load both s1 and s2 using
87 unaligned dwords and compare. */
93 bne cr0, L(different_nocmpb)
100 bne cr0, L(different_nocmpb)
106 /* Now it has checked for first 16 bytes. */
109 lvsr v6, 0, r4 /* Compute mask. */
115 lvsr v10, 0, r7 /* Compute mask. */
117 /* Both s1 and s2 are unaligned. */
118 GET16BYTES(v4, r7, v10)
119 GET16BYTES(v5, r4, v6)
124 /* Align s1 to qw and adjust s2 address. */
134 /* There are 2 loops depending on the input alignment.
135 Each loop gets 16 bytes from s1 and s2 and compares.
136 Loop until a mismatch or null occurs. */
139 GET16BYTES(v5, r4, v6)
143 bne cr6, L(different)
146 GET16BYTES(v5, r4, v6)
150 bne cr6, L(different)
153 GET16BYTES(v5, r4, v6)
157 bne cr6, L(different)
160 GET16BYTES(v5, r4, v6)
174 bne cr6, L(different)
181 bne cr6, L(different)
188 bne cr6, L(different)
197 /* Calculate and return the difference. */
214 rldicl r10, r10, 0, 56
225 bge cr7, L(pagecross)
228 /* If unaligned 16 bytes reads across a 4K page boundary, it uses
229 a simple byte a byte comparison until the page alignment for s1
238 /* Loads a byte from s1 and s2, compare if *s1 is equal to *s2
239 and if *s1 is '\0'. */
246 bne cr7, L(pagecross_ne)
247 beq cr5, L(pagecross_nullfound)
248 bdnz L(pagecross_loop)
255 L(pagecross_retdiff):
261 L(pagecross_nullfound):
263 b L(pagecross_retdiff)
265 libc_hidden_builtin_def (strcmp)
267 #include <sysdeps/powerpc/powerpc64/power8/strcmp.S>