Fix ldbl-128ibm fmodl handling of equal arguments with low part zero (bug 19602).
[glibc.git] / sysdeps / tile / memcmp.c
blob3246fb1159cf0dff279e54b79787c092b885d9f1
1 /* Copyright (C) 1991-2016 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
4 The GNU C Library is free software; you can redistribute it and/or
5 modify it under the terms of the GNU Lesser General Public
6 License as published by the Free Software Foundation; either
7 version 2.1 of the License, or (at your option) any later version.
9 The GNU C Library is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 Lesser General Public License for more details.
14 You should have received a copy of the GNU Lesser General Public
15 License along with the GNU C Library; if not, see
16 <http://www.gnu.org/licenses/>. */
18 #ifdef HAVE_CONFIG_H
19 # include "config.h"
20 #endif
22 #undef __ptr_t
23 #define __ptr_t void *
25 #if defined HAVE_STRING_H || defined _LIBC
26 # include <string.h>
27 #endif
29 #undef memcmp
31 #ifndef MEMCMP
32 # define MEMCMP memcmp
33 #endif
35 #ifdef _LIBC
37 # include <memcopy.h>
38 # include <endian.h>
40 # if __BYTE_ORDER == __BIG_ENDIAN
41 # define WORDS_BIGENDIAN
42 # endif
44 #else /* Not in the GNU C library. */
46 # include <sys/types.h>
48 /* Type to use for aligned memory operations.
49 This should normally be the biggest type supported by a single load
50 and store. Must be an unsigned type. */
51 # define op_t unsigned long int
52 # define OPSIZ (sizeof(op_t))
54 /* Threshold value for when to enter the unrolled loops. */
55 # define OP_T_THRES 16
57 /* Type to use for unaligned operations. */
58 typedef unsigned char byte;
60 #endif /* In the GNU C library. */
62 /* Provide the appropriate builtins to shift two registers based on
63 the alignment of a pointer held in a third register, and to reverse
64 the bytes in a word. */
65 #ifdef __tilegx__
66 #define DBLALIGN __insn_dblalign
67 #define REVBYTES __insn_revbytes
68 #else
69 #define DBLALIGN __insn_dword_align
70 #define REVBYTES __insn_bytex
71 #endif
73 #ifdef WORDS_BIGENDIAN
74 # define CMP_LT_OR_GT(a, b) ((a) > (b) ? 1 : -1)
75 #else
76 # define CMP_LT_OR_GT(a, b) (REVBYTES(a) > REVBYTES(b) ? 1 : -1)
77 #endif
79 /* BE VERY CAREFUL IF YOU CHANGE THIS CODE! */
81 /* The strategy of this memcmp is:
83 1. Compare bytes until one of the block pointers is aligned.
85 2. Compare using memcmp_common_alignment or
86 memcmp_not_common_alignment, regarding the alignment of the other
87 block after the initial byte operations. The maximum number of
88 full words (of type op_t) are compared in this way.
90 3. Compare the few remaining bytes. */
92 static int memcmp_common_alignment (long, long, size_t) __THROW;
94 /* memcmp_common_alignment -- Compare blocks at SRCP1 and SRCP2 with LEN `op_t'
95 objects (not LEN bytes!). Both SRCP1 and SRCP2 should be aligned for
96 memory operations on `op_t's. */
97 static int
98 memcmp_common_alignment (long int srcp1, long int srcp2, size_t len)
100 op_t a0, a1;
101 op_t b0, b1;
103 switch (len % 4)
105 default: /* Avoid warning about uninitialized local variables. */
106 case 2:
107 a0 = ((op_t *) srcp1)[0];
108 b0 = ((op_t *) srcp2)[0];
109 srcp1 += OPSIZ;
110 srcp2 += OPSIZ;
111 len += 2;
112 goto do1;
113 case 3:
114 a1 = ((op_t *) srcp1)[0];
115 b1 = ((op_t *) srcp2)[0];
116 srcp1 += OPSIZ;
117 srcp2 += OPSIZ;
118 len += 1;
119 goto do2;
120 case 0:
121 if (OP_T_THRES <= 3 * OPSIZ && len == 0)
122 return 0;
123 a0 = ((op_t *) srcp1)[0];
124 b0 = ((op_t *) srcp2)[0];
125 srcp1 += OPSIZ;
126 srcp2 += OPSIZ;
127 goto do3;
128 case 1:
129 a1 = ((op_t *) srcp1)[0];
130 b1 = ((op_t *) srcp2)[0];
131 srcp1 += OPSIZ;
132 srcp2 += OPSIZ;
133 len -= 1;
134 if (OP_T_THRES <= 3 * OPSIZ && len == 0)
135 goto do0;
136 /* Fall through. */
141 a0 = ((op_t *) srcp1)[0];
142 b0 = ((op_t *) srcp2)[0];
143 srcp1 += OPSIZ;
144 srcp2 += OPSIZ;
145 if (__glibc_likely (a1 != b1))
146 return CMP_LT_OR_GT (a1, b1);
148 do3:
149 a1 = ((op_t *) srcp1)[0];
150 b1 = ((op_t *) srcp2)[0];
151 srcp1 += OPSIZ;
152 srcp2 += OPSIZ;
153 if (__glibc_likely (a0 != b0))
154 return CMP_LT_OR_GT (a0, b0);
156 do2:
157 a0 = ((op_t *) srcp1)[0];
158 b0 = ((op_t *) srcp2)[0];
159 srcp1 += OPSIZ;
160 srcp2 += OPSIZ;
161 if (__glibc_likely (a1 != b1))
162 return CMP_LT_OR_GT (a1, b1);
164 do1:
165 a1 = ((op_t *) srcp1)[0];
166 b1 = ((op_t *) srcp2)[0];
167 srcp1 += OPSIZ;
168 srcp2 += OPSIZ;
169 if (__glibc_likely (a0 != b0))
170 return CMP_LT_OR_GT (a0, b0);
172 len -= 4;
174 while (len != 0);
176 /* This is the right position for do0. Please don't move
177 it into the loop. */
178 do0:
179 if (__glibc_likely (a1 != b1))
180 return CMP_LT_OR_GT (a1, b1);
181 return 0;
184 static int memcmp_not_common_alignment (long, long, size_t) __THROW;
186 /* memcmp_not_common_alignment -- Compare blocks at SRCP1 and SRCP2 with LEN
187 `op_t' objects (not LEN bytes!). SRCP2 should be aligned for memory
188 operations on `op_t', but SRCP1 *should be unaligned*. */
189 static int
190 memcmp_not_common_alignment (long int srcp1, long int srcp2, size_t len)
192 void * srcp1i;
193 op_t a0, a1, a2, a3;
194 op_t b0, b1, b2, b3;
195 op_t x;
197 /* Calculate how to shift a word read at the memory operation
198 aligned srcp1 to make it aligned for comparison. */
200 srcp1i = (void *) srcp1;
202 /* Make SRCP1 aligned by rounding it down to the beginning of the `op_t'
203 it points in the middle of. */
204 srcp1 &= -OPSIZ;
206 switch (len % 4)
208 default: /* Avoid warning about uninitialized local variables. */
209 case 2:
210 a1 = ((op_t *) srcp1)[0];
211 a2 = ((op_t *) srcp1)[1];
212 b2 = ((op_t *) srcp2)[0];
213 srcp1 += 2 * OPSIZ;
214 srcp2 += 1 * OPSIZ;
215 len += 2;
216 goto do1;
217 case 3:
218 a0 = ((op_t *) srcp1)[0];
219 a1 = ((op_t *) srcp1)[1];
220 b1 = ((op_t *) srcp2)[0];
221 srcp1 += 2 * OPSIZ;
222 srcp2 += 1 * OPSIZ;
223 len += 1;
224 goto do2;
225 case 0:
226 if (OP_T_THRES <= 3 * OPSIZ && len == 0)
227 return 0;
228 a3 = ((op_t *) srcp1)[0];
229 a0 = ((op_t *) srcp1)[1];
230 b0 = ((op_t *) srcp2)[0];
231 srcp1 += 2 * OPSIZ;
232 srcp2 += 1 * OPSIZ;
233 goto do3;
234 case 1:
235 a2 = ((op_t *) srcp1)[0];
236 a3 = ((op_t *) srcp1)[1];
237 b3 = ((op_t *) srcp2)[0];
238 srcp1 += 2 * OPSIZ;
239 srcp2 += 1 * OPSIZ;
240 len -= 1;
241 if (OP_T_THRES <= 3 * OPSIZ && len == 0)
242 goto do0;
243 /* Fall through. */
248 a0 = ((op_t *) srcp1)[0];
249 b0 = ((op_t *) srcp2)[0];
250 x = DBLALIGN (a2, a3, srcp1i);
251 srcp1 += OPSIZ;
252 srcp2 += OPSIZ;
253 if (__glibc_likely (x != b3))
254 return CMP_LT_OR_GT (x, b3);
256 do3:
257 a1 = ((op_t *) srcp1)[0];
258 b1 = ((op_t *) srcp2)[0];
259 x = DBLALIGN (a3, a0, srcp1i);
260 srcp1 += OPSIZ;
261 srcp2 += OPSIZ;
262 if (__glibc_likely (x != b0))
263 return CMP_LT_OR_GT (x, b0);
265 do2:
266 a2 = ((op_t *) srcp1)[0];
267 b2 = ((op_t *) srcp2)[0];
268 x = DBLALIGN (a0, a1, srcp1i);
269 srcp1 += OPSIZ;
270 srcp2 += OPSIZ;
271 if (__glibc_likely (x != b1))
272 return CMP_LT_OR_GT (x, b1);
274 do1:
275 a3 = ((op_t *) srcp1)[0];
276 b3 = ((op_t *) srcp2)[0];
277 x = DBLALIGN (a1, a2, srcp1i);
278 srcp1 += OPSIZ;
279 srcp2 += OPSIZ;
280 if (__glibc_likely (x != b2))
281 return CMP_LT_OR_GT (x, b2);
283 len -= 4;
285 while (len != 0);
287 /* This is the right position for do0. Please don't move
288 it into the loop. */
289 do0:
290 x = DBLALIGN (a2, a3, srcp1i);
291 if (__glibc_likely (x != b3))
292 return CMP_LT_OR_GT (x, b3);
293 return 0;
297 MEMCMP (const __ptr_t s1, const __ptr_t s2, size_t len)
299 op_t a0;
300 op_t b0;
301 long int srcp1 = (long int) s1;
302 long int srcp2 = (long int) s2;
303 int res;
305 if (len >= OP_T_THRES)
307 /* There are at least some bytes to compare. No need to test
308 for LEN == 0 in this alignment loop. */
309 while (srcp2 % OPSIZ != 0)
311 a0 = ((byte *) srcp1)[0];
312 b0 = ((byte *) srcp2)[0];
313 srcp1 += 1;
314 srcp2 += 1;
315 res = a0 - b0;
316 if (__glibc_likely (res != 0))
317 return res;
318 len -= 1;
321 /* SRCP2 is now aligned for memory operations on `op_t'.
322 SRCP1 alignment determines if we can do a simple,
323 aligned compare or need to shuffle bits. */
325 if (srcp1 % OPSIZ == 0)
326 res = memcmp_common_alignment (srcp1, srcp2, len / OPSIZ);
327 else
328 res = memcmp_not_common_alignment (srcp1, srcp2, len / OPSIZ);
329 if (res != 0)
330 return res;
332 /* Number of bytes remaining in the interval [0..OPSIZ-1]. */
333 srcp1 += len & -OPSIZ;
334 srcp2 += len & -OPSIZ;
335 len %= OPSIZ;
338 /* There are just a few bytes to compare. Use byte memory operations. */
339 while (len != 0)
341 a0 = ((byte *) srcp1)[0];
342 b0 = ((byte *) srcp2)[0];
343 srcp1 += 1;
344 srcp2 += 1;
345 res = a0 - b0;
346 if (__glibc_likely (res != 0))
347 return res;
348 len -= 1;
351 return 0;
353 libc_hidden_builtin_def(memcmp)
354 #ifdef weak_alias
355 # undef bcmp
356 weak_alias (memcmp, bcmp)
357 #endif