Remove ancient __signbit inlines
[glibc.git] / string / memcmp.c
blobabd4f24fe435414c169ccf899b3b6de530ce73d9
1 /* Copyright (C) 1991-2017 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Torbjorn Granlund (tege@sics.se).
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
19 #ifdef HAVE_CONFIG_H
20 # include "config.h"
21 #endif
23 #if defined HAVE_STRING_H || defined _LIBC
24 # include <string.h>
25 #endif
27 #undef memcmp
29 #ifndef MEMCMP
30 # define MEMCMP memcmp
31 #endif
33 #ifdef _LIBC
35 # include <memcopy.h>
36 # include <endian.h>
38 # if __BYTE_ORDER == __BIG_ENDIAN
39 # define WORDS_BIGENDIAN
40 # endif
42 #else /* Not in the GNU C library. */
44 # include <sys/types.h>
46 /* Type to use for aligned memory operations.
47 This should normally be the biggest type supported by a single load
48 and store. Must be an unsigned type. */
49 # define op_t unsigned long int
50 # define OPSIZ (sizeof(op_t))
52 /* Threshold value for when to enter the unrolled loops. */
53 # define OP_T_THRES 16
55 /* Type to use for unaligned operations. */
56 typedef unsigned char byte;
58 # ifndef WORDS_BIGENDIAN
59 # define MERGE(w0, sh_1, w1, sh_2) (((w0) >> (sh_1)) | ((w1) << (sh_2)))
60 # else
61 # define MERGE(w0, sh_1, w1, sh_2) (((w0) << (sh_1)) | ((w1) >> (sh_2)))
62 # endif
64 #endif /* In the GNU C library. */
66 #ifdef WORDS_BIGENDIAN
67 # define CMP_LT_OR_GT(a, b) ((a) > (b) ? 1 : -1)
68 #else
69 # define CMP_LT_OR_GT(a, b) memcmp_bytes ((a), (b))
70 #endif
72 /* BE VERY CAREFUL IF YOU CHANGE THIS CODE! */
74 /* The strategy of this memcmp is:
76 1. Compare bytes until one of the block pointers is aligned.
78 2. Compare using memcmp_common_alignment or
79 memcmp_not_common_alignment, regarding the alignment of the other
80 block after the initial byte operations. The maximum number of
81 full words (of type op_t) are compared in this way.
83 3. Compare the few remaining bytes. */
85 #ifndef WORDS_BIGENDIAN
86 /* memcmp_bytes -- Compare A and B bytewise in the byte order of the machine.
87 A and B are known to be different.
88 This is needed only on little-endian machines. */
90 static int memcmp_bytes (op_t, op_t) __THROW;
92 static int
93 memcmp_bytes (op_t a, op_t b)
95 long int srcp1 = (long int) &a;
96 long int srcp2 = (long int) &b;
97 op_t a0, b0;
101 a0 = ((byte *) srcp1)[0];
102 b0 = ((byte *) srcp2)[0];
103 srcp1 += 1;
104 srcp2 += 1;
106 while (a0 == b0);
107 return a0 - b0;
109 #endif
111 static int memcmp_common_alignment (long, long, size_t) __THROW;
113 /* memcmp_common_alignment -- Compare blocks at SRCP1 and SRCP2 with LEN `op_t'
114 objects (not LEN bytes!). Both SRCP1 and SRCP2 should be aligned for
115 memory operations on `op_t's. */
116 static int
117 memcmp_common_alignment (long int srcp1, long int srcp2, size_t len)
119 op_t a0, a1;
120 op_t b0, b1;
122 switch (len % 4)
124 default: /* Avoid warning about uninitialized local variables. */
125 case 2:
126 a0 = ((op_t *) srcp1)[0];
127 b0 = ((op_t *) srcp2)[0];
128 srcp1 -= 2 * OPSIZ;
129 srcp2 -= 2 * OPSIZ;
130 len += 2;
131 goto do1;
132 case 3:
133 a1 = ((op_t *) srcp1)[0];
134 b1 = ((op_t *) srcp2)[0];
135 srcp1 -= OPSIZ;
136 srcp2 -= OPSIZ;
137 len += 1;
138 goto do2;
139 case 0:
140 if (OP_T_THRES <= 3 * OPSIZ && len == 0)
141 return 0;
142 a0 = ((op_t *) srcp1)[0];
143 b0 = ((op_t *) srcp2)[0];
144 goto do3;
145 case 1:
146 a1 = ((op_t *) srcp1)[0];
147 b1 = ((op_t *) srcp2)[0];
148 srcp1 += OPSIZ;
149 srcp2 += OPSIZ;
150 len -= 1;
151 if (OP_T_THRES <= 3 * OPSIZ && len == 0)
152 goto do0;
153 /* Fall through. */
158 a0 = ((op_t *) srcp1)[0];
159 b0 = ((op_t *) srcp2)[0];
160 if (a1 != b1)
161 return CMP_LT_OR_GT (a1, b1);
163 do3:
164 a1 = ((op_t *) srcp1)[1];
165 b1 = ((op_t *) srcp2)[1];
166 if (a0 != b0)
167 return CMP_LT_OR_GT (a0, b0);
169 do2:
170 a0 = ((op_t *) srcp1)[2];
171 b0 = ((op_t *) srcp2)[2];
172 if (a1 != b1)
173 return CMP_LT_OR_GT (a1, b1);
175 do1:
176 a1 = ((op_t *) srcp1)[3];
177 b1 = ((op_t *) srcp2)[3];
178 if (a0 != b0)
179 return CMP_LT_OR_GT (a0, b0);
181 srcp1 += 4 * OPSIZ;
182 srcp2 += 4 * OPSIZ;
183 len -= 4;
185 while (len != 0);
187 /* This is the right position for do0. Please don't move
188 it into the loop. */
189 do0:
190 if (a1 != b1)
191 return CMP_LT_OR_GT (a1, b1);
192 return 0;
195 static int memcmp_not_common_alignment (long, long, size_t) __THROW;
197 /* memcmp_not_common_alignment -- Compare blocks at SRCP1 and SRCP2 with LEN
198 `op_t' objects (not LEN bytes!). SRCP2 should be aligned for memory
199 operations on `op_t', but SRCP1 *should be unaligned*. */
200 static int
201 memcmp_not_common_alignment (long int srcp1, long int srcp2, size_t len)
203 op_t a0, a1, a2, a3;
204 op_t b0, b1, b2, b3;
205 op_t x;
206 int shl, shr;
208 /* Calculate how to shift a word read at the memory operation
209 aligned srcp1 to make it aligned for comparison. */
211 shl = 8 * (srcp1 % OPSIZ);
212 shr = 8 * OPSIZ - shl;
214 /* Make SRCP1 aligned by rounding it down to the beginning of the `op_t'
215 it points in the middle of. */
216 srcp1 &= -OPSIZ;
218 switch (len % 4)
220 default: /* Avoid warning about uninitialized local variables. */
221 case 2:
222 a1 = ((op_t *) srcp1)[0];
223 a2 = ((op_t *) srcp1)[1];
224 b2 = ((op_t *) srcp2)[0];
225 srcp1 -= 1 * OPSIZ;
226 srcp2 -= 2 * OPSIZ;
227 len += 2;
228 goto do1;
229 case 3:
230 a0 = ((op_t *) srcp1)[0];
231 a1 = ((op_t *) srcp1)[1];
232 b1 = ((op_t *) srcp2)[0];
233 srcp2 -= 1 * OPSIZ;
234 len += 1;
235 goto do2;
236 case 0:
237 if (OP_T_THRES <= 3 * OPSIZ && len == 0)
238 return 0;
239 a3 = ((op_t *) srcp1)[0];
240 a0 = ((op_t *) srcp1)[1];
241 b0 = ((op_t *) srcp2)[0];
242 srcp1 += 1 * OPSIZ;
243 goto do3;
244 case 1:
245 a2 = ((op_t *) srcp1)[0];
246 a3 = ((op_t *) srcp1)[1];
247 b3 = ((op_t *) srcp2)[0];
248 srcp1 += 2 * OPSIZ;
249 srcp2 += 1 * OPSIZ;
250 len -= 1;
251 if (OP_T_THRES <= 3 * OPSIZ && len == 0)
252 goto do0;
253 /* Fall through. */
258 a0 = ((op_t *) srcp1)[0];
259 b0 = ((op_t *) srcp2)[0];
260 x = MERGE(a2, shl, a3, shr);
261 if (x != b3)
262 return CMP_LT_OR_GT (x, b3);
264 do3:
265 a1 = ((op_t *) srcp1)[1];
266 b1 = ((op_t *) srcp2)[1];
267 x = MERGE(a3, shl, a0, shr);
268 if (x != b0)
269 return CMP_LT_OR_GT (x, b0);
271 do2:
272 a2 = ((op_t *) srcp1)[2];
273 b2 = ((op_t *) srcp2)[2];
274 x = MERGE(a0, shl, a1, shr);
275 if (x != b1)
276 return CMP_LT_OR_GT (x, b1);
278 do1:
279 a3 = ((op_t *) srcp1)[3];
280 b3 = ((op_t *) srcp2)[3];
281 x = MERGE(a1, shl, a2, shr);
282 if (x != b2)
283 return CMP_LT_OR_GT (x, b2);
285 srcp1 += 4 * OPSIZ;
286 srcp2 += 4 * OPSIZ;
287 len -= 4;
289 while (len != 0);
291 /* This is the right position for do0. Please don't move
292 it into the loop. */
293 do0:
294 x = MERGE(a2, shl, a3, shr);
295 if (x != b3)
296 return CMP_LT_OR_GT (x, b3);
297 return 0;
301 MEMCMP (const void *s1, const void *s2, size_t len)
303 op_t a0;
304 op_t b0;
305 long int srcp1 = (long int) s1;
306 long int srcp2 = (long int) s2;
307 op_t res;
309 if (len >= OP_T_THRES)
311 /* There are at least some bytes to compare. No need to test
312 for LEN == 0 in this alignment loop. */
313 while (srcp2 % OPSIZ != 0)
315 a0 = ((byte *) srcp1)[0];
316 b0 = ((byte *) srcp2)[0];
317 srcp1 += 1;
318 srcp2 += 1;
319 res = a0 - b0;
320 if (res != 0)
321 return res;
322 len -= 1;
325 /* SRCP2 is now aligned for memory operations on `op_t'.
326 SRCP1 alignment determines if we can do a simple,
327 aligned compare or need to shuffle bits. */
329 if (srcp1 % OPSIZ == 0)
330 res = memcmp_common_alignment (srcp1, srcp2, len / OPSIZ);
331 else
332 res = memcmp_not_common_alignment (srcp1, srcp2, len / OPSIZ);
333 if (res != 0)
334 return res;
336 /* Number of bytes remaining in the interval [0..OPSIZ-1]. */
337 srcp1 += len & -OPSIZ;
338 srcp2 += len & -OPSIZ;
339 len %= OPSIZ;
342 /* There are just a few bytes to compare. Use byte memory operations. */
343 while (len != 0)
345 a0 = ((byte *) srcp1)[0];
346 b0 = ((byte *) srcp2)[0];
347 srcp1 += 1;
348 srcp2 += 1;
349 res = a0 - b0;
350 if (res != 0)
351 return res;
352 len -= 1;
355 return 0;
357 libc_hidden_builtin_def(memcmp)
358 #ifdef weak_alias
359 # undef bcmp
360 weak_alias (memcmp, bcmp)
361 #endif