MFC if_ethersubr.c rev1.77:
[dragonfly.git] / lib / libc / stdlib / merge.c
blob3a7ad9d3f23f5b86909e2143d5dd04f07b2afa46
1 /*-
2 * Copyright (c) 1992, 1993
3 * The Regents of the University of California. All rights reserved.
5 * This code is derived from software contributed to Berkeley by
6 * Peter McIlroy.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
36 * @(#)merge.c 8.2 (Berkeley) 2/14/94
37 * $DragonFly: src/lib/libc/stdlib/merge.c,v 1.6 2005/11/20 12:37:48 swildner Exp $
41 * Hybrid exponential search/linear search merge sort with hybrid
42 * natural/pairwise first pass. Requires about .3% more comparisons
43 * for random data than LSMS with pairwise first pass alone.
44 * It works for objects as small as two bytes.
47 #define NATURAL
48 #define THRESHOLD 16 /* Best choice for natural merge cut-off. */
50 /* #define NATURAL to get hybrid natural merge.
51 * (The default is pairwise merging.)
54 #include <sys/types.h>
56 #include <errno.h>
57 #include <stdlib.h>
58 #include <string.h>
60 static void setup(u_char *, u_char *, size_t, size_t,
61 int (*)(const void *, const void *));
62 static void insertionsort(u_char *, size_t, size_t,
63 int (*)(const void *, const void *));
65 #define ISIZE sizeof(int)
66 #define PSIZE sizeof(u_char *)
67 #define ICOPY_LIST(src, dst, last) \
68 do \
69 *(int*)dst = *(int*)src, src += ISIZE, dst += ISIZE; \
70 while(src < last)
71 #define ICOPY_ELT(src, dst, i) \
72 do \
73 *(int*) dst = *(int*) src, src += ISIZE, dst += ISIZE; \
74 while (i -= ISIZE)
76 #define CCOPY_LIST(src, dst, last) \
77 do \
78 *dst++ = *src++; \
79 while (src < last)
80 #define CCOPY_ELT(src, dst, i) \
81 do \
82 *dst++ = *src++; \
83 while (i -= 1)
86 * Find the next possible pointer head. (Trickery for forcing an array
87 * to do double duty as a linked list when objects do not align with word
88 * boundaries.
90 /* Assumption: PSIZE is a power of 2. */
91 #define EVAL(p) (u_char **) \
92 ((u_char *)0 + \
93 (((u_char *)p + PSIZE - 1 - (u_char *) 0) & ~(PSIZE - 1)))
96 * Arguments are as for qsort.
98 int
99 mergesort(void *base, size_t nmemb, size_t size,
100 int (*cmp)(const void *, const void *))
102 size_t i;
103 int sense;
104 int big, iflag;
105 u_char *f1, *f2, *t, *b, *tp2, *q, *l1, *l2;
106 u_char *list2, *list1, *p2, *p, *last, **p1;
108 if (size < PSIZE / 2) { /* Pointers must fit into 2 * size. */
109 errno = EINVAL;
110 return (-1);
113 if (nmemb == 0)
114 return (0);
117 * XXX
118 * Stupid subtraction for the Cray.
120 iflag = 0;
121 if (!(size % ISIZE) && !(((char *)base - (char *)0) % ISIZE))
122 iflag = 1;
124 if ((list2 = malloc(nmemb * size + PSIZE)) == NULL)
125 return (-1);
127 list1 = base;
128 setup(list1, list2, nmemb, size, cmp);
129 last = list2 + nmemb * size;
130 i = big = 0;
131 while (*EVAL(list2) != last) {
132 l2 = list1;
133 p1 = EVAL(list1);
134 for (tp2 = p2 = list2; p2 != last; p1 = EVAL(l2)) {
135 p2 = *EVAL(p2);
136 f1 = l2;
137 f2 = l1 = list1 + (p2 - list2);
138 if (p2 != last)
139 p2 = *EVAL(p2);
140 l2 = list1 + (p2 - list2);
141 while (f1 < l1 && f2 < l2) {
142 if ((*cmp)(f1, f2) <= 0) {
143 q = f2;
144 b = f1, t = l1;
145 sense = -1;
146 } else {
147 q = f1;
148 b = f2, t = l2;
149 sense = 0;
151 if (!big) { /* here i = 0 */
152 while ((b += size) < t && cmp(q, b) >sense)
153 if (++i == 6) {
154 big = 1;
155 goto EXPONENTIAL;
157 } else {
158 EXPONENTIAL: for (i = size; ; i <<= 1)
159 if ((p = (b + i)) >= t) {
160 if ((p = t - size) > b &&
161 (*cmp)(q, p) <= sense)
162 t = p;
163 else
164 b = p;
165 break;
166 } else if ((*cmp)(q, p) <= sense) {
167 t = p;
168 if (i == size)
169 big = 0;
170 goto FASTCASE;
171 } else
172 b = p;
173 while (t > b+size) {
174 i = (((t - b) / size) >> 1) * size;
175 if ((*cmp)(q, p = b + i) <= sense)
176 t = p;
177 else
178 b = p;
180 goto COPY;
181 FASTCASE: while (i > size)
182 if ((*cmp)(q,
183 p = b + (i >>= 1)) <= sense)
184 t = p;
185 else
186 b = p;
187 COPY: b = t;
189 i = size;
190 if (q == f1) {
191 if (iflag) {
192 ICOPY_LIST(f2, tp2, b);
193 ICOPY_ELT(f1, tp2, i);
194 } else {
195 CCOPY_LIST(f2, tp2, b);
196 CCOPY_ELT(f1, tp2, i);
198 } else {
199 if (iflag) {
200 ICOPY_LIST(f1, tp2, b);
201 ICOPY_ELT(f2, tp2, i);
202 } else {
203 CCOPY_LIST(f1, tp2, b);
204 CCOPY_ELT(f2, tp2, i);
208 if (f2 < l2) {
209 if (iflag)
210 ICOPY_LIST(f2, tp2, l2);
211 else
212 CCOPY_LIST(f2, tp2, l2);
213 } else if (f1 < l1) {
214 if (iflag)
215 ICOPY_LIST(f1, tp2, l1);
216 else
217 CCOPY_LIST(f1, tp2, l1);
219 *p1 = l2;
221 tp2 = list1; /* swap list1, list2 */
222 list1 = list2;
223 list2 = tp2;
224 last = list2 + nmemb*size;
226 if (base == list2) {
227 memmove(list2, list1, nmemb*size);
228 list2 = list1;
230 free(list2);
231 return (0);
234 #define swap(a, b) { \
235 s = b; \
236 i = size; \
237 do { \
238 tmp = *a; *a++ = *s; *s++ = tmp; \
239 } while (--i); \
240 a -= size; \
242 #define reverse(bot, top) { \
243 s = top; \
244 do { \
245 i = size; \
246 do { \
247 tmp = *bot; *bot++ = *s; *s++ = tmp; \
248 } while (--i); \
249 s -= size2; \
250 } while(bot < s); \
254 * Optional hybrid natural/pairwise first pass. Eats up list1 in runs of
255 * increasing order, list2 in a corresponding linked list. Checks for runs
256 * when THRESHOLD/2 pairs compare with same sense. (Only used when NATURAL
257 * is defined. Otherwise simple pairwise merging is used.)
259 void
260 setup(u_char *list1, u_char *list2, size_t n, size_t size,
261 int (*cmp)(const void *, const void *))
263 int i, length, size2, tmp, sense;
264 u_char *f1, *f2, *s, *l2, *last, *p2;
266 size2 = size*2;
267 if (n <= 5) {
268 insertionsort(list1, n, size, cmp);
269 *EVAL(list2) = (u_char*) list2 + n*size;
270 return;
273 * Avoid running pointers out of bounds; limit n to evens
274 * for simplicity.
276 i = 4 + (n & 1);
277 insertionsort(list1 + (n - i) * size, i, size, cmp);
278 last = list1 + size * (n - i);
279 *EVAL(list2 + (last - list1)) = list2 + n * size;
281 #ifdef NATURAL
282 p2 = list2;
283 f1 = list1;
284 sense = (cmp(f1, f1 + size) > 0);
285 for (; f1 < last; sense = !sense) {
286 length = 2;
287 /* Find pairs with same sense. */
288 for (f2 = f1 + size2; f2 < last; f2 += size2) {
289 if ((cmp(f2, f2+ size) > 0) != sense)
290 break;
291 length += 2;
293 if (length < THRESHOLD) { /* Pairwise merge */
294 do {
295 p2 = *EVAL(p2) = f1 + size2 - list1 + list2;
296 if (sense > 0)
297 swap (f1, f1 + size);
298 } while ((f1 += size2) < f2);
299 } else { /* Natural merge */
300 l2 = f2;
301 for (f2 = f1 + size2; f2 < l2; f2 += size2) {
302 if ((cmp(f2-size, f2) > 0) != sense) {
303 p2 = *EVAL(p2) = f2 - list1 + list2;
304 if (sense > 0)
305 reverse(f1, f2-size);
306 f1 = f2;
309 if (sense > 0)
310 reverse (f1, f2-size);
311 f1 = f2;
312 if (f2 < last || cmp(f2 - size, f2) > 0)
313 p2 = *EVAL(p2) = f2 - list1 + list2;
314 else
315 p2 = *EVAL(p2) = list2 + n*size;
318 #else /* pairwise merge only. */
319 for (f1 = list1, p2 = list2; f1 < last; f1 += size2) {
320 p2 = *EVAL(p2) = p2 + size2;
321 if (cmp (f1, f1 + size) > 0)
322 swap(f1, f1 + size);
324 #endif /* NATURAL */
328 * This is to avoid out-of-bounds addresses in sorting the
329 * last 4 elements.
331 static void
332 insertionsort(u_char *a, size_t n, size_t size,
333 int (*cmp)(const void *, const void *))
335 u_char *ai, *s, *t, *u, tmp;
336 int i;
338 for (ai = a+size; --n >= 1; ai += size)
339 for (t = ai; t > a; t -= size) {
340 u = t - size;
341 if (cmp(u, t) <= 0)
342 break;
343 swap(u, t);