Optimize __libc_lock_lock and __libc_lock_trylock for MIPS.
[glibc.git] / string / memcmp.c
bloba73cc19b62825d811342856bb840c488052eedfe
1 /* Copyright (C) 1991,1993,1995,1997,1998,2003,2004,2012
2 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Torbjorn Granlund (tege@sics.se).
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public
8 License as published by the Free Software Foundation; either
9 version 2.1 of the License, or (at your option) any later version.
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; if not, see
18 <http://www.gnu.org/licenses/>. */
20 #ifdef HAVE_CONFIG_H
21 # include "config.h"
22 #endif
24 #undef __ptr_t
25 #define __ptr_t void *
27 #if defined HAVE_STRING_H || defined _LIBC
28 # include <string.h>
29 #endif
31 #undef memcmp
33 #ifdef _LIBC
35 # include <memcopy.h>
36 # include <endian.h>
38 # if __BYTE_ORDER == __BIG_ENDIAN
39 # define WORDS_BIGENDIAN
40 # endif
42 #else /* Not in the GNU C library. */
44 # include <sys/types.h>
46 /* Type to use for aligned memory operations.
47 This should normally be the biggest type supported by a single load
48 and store. Must be an unsigned type. */
49 # define op_t unsigned long int
50 # define OPSIZ (sizeof(op_t))
52 /* Threshold value for when to enter the unrolled loops. */
53 # define OP_T_THRES 16
55 /* Type to use for unaligned operations. */
56 typedef unsigned char byte;
58 # ifndef WORDS_BIGENDIAN
59 # define MERGE(w0, sh_1, w1, sh_2) (((w0) >> (sh_1)) | ((w1) << (sh_2)))
60 # else
61 # define MERGE(w0, sh_1, w1, sh_2) (((w0) << (sh_1)) | ((w1) >> (sh_2)))
62 # endif
64 #endif /* In the GNU C library. */
66 #ifdef WORDS_BIGENDIAN
67 # define CMP_LT_OR_GT(a, b) ((a) > (b) ? 1 : -1)
68 #else
69 # define CMP_LT_OR_GT(a, b) memcmp_bytes ((a), (b))
70 #endif
72 /* BE VERY CAREFUL IF YOU CHANGE THIS CODE! */
74 /* The strategy of this memcmp is:
76 1. Compare bytes until one of the block pointers is aligned.
78 2. Compare using memcmp_common_alignment or
79 memcmp_not_common_alignment, regarding the alignment of the other
80 block after the initial byte operations. The maximum number of
81 full words (of type op_t) are compared in this way.
83 3. Compare the few remaining bytes. */
85 #ifndef WORDS_BIGENDIAN
86 /* memcmp_bytes -- Compare A and B bytewise in the byte order of the machine.
87 A and B are known to be different.
88 This is needed only on little-endian machines. */
90 static int memcmp_bytes (op_t, op_t) __THROW;
92 # ifdef __GNUC__
93 __inline
94 # endif
95 static int
96 memcmp_bytes (a, b)
97 op_t a, b;
99 long int srcp1 = (long int) &a;
100 long int srcp2 = (long int) &b;
101 op_t a0, b0;
105 a0 = ((byte *) srcp1)[0];
106 b0 = ((byte *) srcp2)[0];
107 srcp1 += 1;
108 srcp2 += 1;
110 while (a0 == b0);
111 return a0 - b0;
113 #endif
115 static int memcmp_common_alignment (long, long, size_t) __THROW;
117 /* memcmp_common_alignment -- Compare blocks at SRCP1 and SRCP2 with LEN `op_t'
118 objects (not LEN bytes!). Both SRCP1 and SRCP2 should be aligned for
119 memory operations on `op_t's. */
120 static int
121 memcmp_common_alignment (srcp1, srcp2, len)
122 long int srcp1;
123 long int srcp2;
124 size_t len;
126 op_t a0, a1;
127 op_t b0, b1;
129 switch (len % 4)
131 default: /* Avoid warning about uninitialized local variables. */
132 case 2:
133 a0 = ((op_t *) srcp1)[0];
134 b0 = ((op_t *) srcp2)[0];
135 srcp1 -= 2 * OPSIZ;
136 srcp2 -= 2 * OPSIZ;
137 len += 2;
138 goto do1;
139 case 3:
140 a1 = ((op_t *) srcp1)[0];
141 b1 = ((op_t *) srcp2)[0];
142 srcp1 -= OPSIZ;
143 srcp2 -= OPSIZ;
144 len += 1;
145 goto do2;
146 case 0:
147 if (OP_T_THRES <= 3 * OPSIZ && len == 0)
148 return 0;
149 a0 = ((op_t *) srcp1)[0];
150 b0 = ((op_t *) srcp2)[0];
151 goto do3;
152 case 1:
153 a1 = ((op_t *) srcp1)[0];
154 b1 = ((op_t *) srcp2)[0];
155 srcp1 += OPSIZ;
156 srcp2 += OPSIZ;
157 len -= 1;
158 if (OP_T_THRES <= 3 * OPSIZ && len == 0)
159 goto do0;
160 /* Fall through. */
165 a0 = ((op_t *) srcp1)[0];
166 b0 = ((op_t *) srcp2)[0];
167 if (a1 != b1)
168 return CMP_LT_OR_GT (a1, b1);
170 do3:
171 a1 = ((op_t *) srcp1)[1];
172 b1 = ((op_t *) srcp2)[1];
173 if (a0 != b0)
174 return CMP_LT_OR_GT (a0, b0);
176 do2:
177 a0 = ((op_t *) srcp1)[2];
178 b0 = ((op_t *) srcp2)[2];
179 if (a1 != b1)
180 return CMP_LT_OR_GT (a1, b1);
182 do1:
183 a1 = ((op_t *) srcp1)[3];
184 b1 = ((op_t *) srcp2)[3];
185 if (a0 != b0)
186 return CMP_LT_OR_GT (a0, b0);
188 srcp1 += 4 * OPSIZ;
189 srcp2 += 4 * OPSIZ;
190 len -= 4;
192 while (len != 0);
194 /* This is the right position for do0. Please don't move
195 it into the loop. */
196 do0:
197 if (a1 != b1)
198 return CMP_LT_OR_GT (a1, b1);
199 return 0;
202 static int memcmp_not_common_alignment (long, long, size_t) __THROW;
204 /* memcmp_not_common_alignment -- Compare blocks at SRCP1 and SRCP2 with LEN
205 `op_t' objects (not LEN bytes!). SRCP2 should be aligned for memory
206 operations on `op_t', but SRCP1 *should be unaligned*. */
207 static int
208 memcmp_not_common_alignment (srcp1, srcp2, len)
209 long int srcp1;
210 long int srcp2;
211 size_t len;
213 op_t a0, a1, a2, a3;
214 op_t b0, b1, b2, b3;
215 op_t x;
216 int shl, shr;
218 /* Calculate how to shift a word read at the memory operation
219 aligned srcp1 to make it aligned for comparison. */
221 shl = 8 * (srcp1 % OPSIZ);
222 shr = 8 * OPSIZ - shl;
224 /* Make SRCP1 aligned by rounding it down to the beginning of the `op_t'
225 it points in the middle of. */
226 srcp1 &= -OPSIZ;
228 switch (len % 4)
230 default: /* Avoid warning about uninitialized local variables. */
231 case 2:
232 a1 = ((op_t *) srcp1)[0];
233 a2 = ((op_t *) srcp1)[1];
234 b2 = ((op_t *) srcp2)[0];
235 srcp1 -= 1 * OPSIZ;
236 srcp2 -= 2 * OPSIZ;
237 len += 2;
238 goto do1;
239 case 3:
240 a0 = ((op_t *) srcp1)[0];
241 a1 = ((op_t *) srcp1)[1];
242 b1 = ((op_t *) srcp2)[0];
243 srcp2 -= 1 * OPSIZ;
244 len += 1;
245 goto do2;
246 case 0:
247 if (OP_T_THRES <= 3 * OPSIZ && len == 0)
248 return 0;
249 a3 = ((op_t *) srcp1)[0];
250 a0 = ((op_t *) srcp1)[1];
251 b0 = ((op_t *) srcp2)[0];
252 srcp1 += 1 * OPSIZ;
253 goto do3;
254 case 1:
255 a2 = ((op_t *) srcp1)[0];
256 a3 = ((op_t *) srcp1)[1];
257 b3 = ((op_t *) srcp2)[0];
258 srcp1 += 2 * OPSIZ;
259 srcp2 += 1 * OPSIZ;
260 len -= 1;
261 if (OP_T_THRES <= 3 * OPSIZ && len == 0)
262 goto do0;
263 /* Fall through. */
268 a0 = ((op_t *) srcp1)[0];
269 b0 = ((op_t *) srcp2)[0];
270 x = MERGE(a2, shl, a3, shr);
271 if (x != b3)
272 return CMP_LT_OR_GT (x, b3);
274 do3:
275 a1 = ((op_t *) srcp1)[1];
276 b1 = ((op_t *) srcp2)[1];
277 x = MERGE(a3, shl, a0, shr);
278 if (x != b0)
279 return CMP_LT_OR_GT (x, b0);
281 do2:
282 a2 = ((op_t *) srcp1)[2];
283 b2 = ((op_t *) srcp2)[2];
284 x = MERGE(a0, shl, a1, shr);
285 if (x != b1)
286 return CMP_LT_OR_GT (x, b1);
288 do1:
289 a3 = ((op_t *) srcp1)[3];
290 b3 = ((op_t *) srcp2)[3];
291 x = MERGE(a1, shl, a2, shr);
292 if (x != b2)
293 return CMP_LT_OR_GT (x, b2);
295 srcp1 += 4 * OPSIZ;
296 srcp2 += 4 * OPSIZ;
297 len -= 4;
299 while (len != 0);
301 /* This is the right position for do0. Please don't move
302 it into the loop. */
303 do0:
304 x = MERGE(a2, shl, a3, shr);
305 if (x != b3)
306 return CMP_LT_OR_GT (x, b3);
307 return 0;
311 memcmp (s1, s2, len)
312 const __ptr_t s1;
313 const __ptr_t s2;
314 size_t len;
316 op_t a0;
317 op_t b0;
318 long int srcp1 = (long int) s1;
319 long int srcp2 = (long int) s2;
320 op_t res;
322 if (len >= OP_T_THRES)
324 /* There are at least some bytes to compare. No need to test
325 for LEN == 0 in this alignment loop. */
326 while (srcp2 % OPSIZ != 0)
328 a0 = ((byte *) srcp1)[0];
329 b0 = ((byte *) srcp2)[0];
330 srcp1 += 1;
331 srcp2 += 1;
332 res = a0 - b0;
333 if (res != 0)
334 return res;
335 len -= 1;
338 /* SRCP2 is now aligned for memory operations on `op_t'.
339 SRCP1 alignment determines if we can do a simple,
340 aligned compare or need to shuffle bits. */
342 if (srcp1 % OPSIZ == 0)
343 res = memcmp_common_alignment (srcp1, srcp2, len / OPSIZ);
344 else
345 res = memcmp_not_common_alignment (srcp1, srcp2, len / OPSIZ);
346 if (res != 0)
347 return res;
349 /* Number of bytes remaining in the interval [0..OPSIZ-1]. */
350 srcp1 += len & -OPSIZ;
351 srcp2 += len & -OPSIZ;
352 len %= OPSIZ;
355 /* There are just a few bytes to compare. Use byte memory operations. */
356 while (len != 0)
358 a0 = ((byte *) srcp1)[0];
359 b0 = ((byte *) srcp2)[0];
360 srcp1 += 1;
361 srcp2 += 1;
362 res = a0 - b0;
363 if (res != 0)
364 return res;
365 len -= 1;
368 return 0;
370 libc_hidden_builtin_def(memcmp)
371 #ifdef weak_alias
372 # undef bcmp
373 weak_alias (memcmp, bcmp)
374 #endif