kernel: Replace struct device* by device_t
[dragonfly.git] / usr.bin / sort / radixsort.c
blobe1d9869d18e262601029ff147f119a6d85a5d795
1 /*-
2 * Copyright (C) 2012 Oleg Moskalenko <mom040267@gmail.com>
3 * Copyright (C) 2012 Gabor Kovesdan <gabor@FreeBSD.org>
4 * All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
27 * $FreeBSD: head/usr.bin/sort/radixsort.c 281133 2015-04-06 03:02:20Z pfg $
31 #include <errno.h>
32 #include <err.h>
33 #include <langinfo.h>
34 #include <math.h>
35 #if defined(SORT_THREADS)
36 #include <pthread.h>
37 #include <semaphore.h>
38 #endif
39 #include <stdlib.h>
40 #include <string.h>
41 #include <wchar.h>
42 #include <wctype.h>
43 #include <unistd.h>
45 #include "coll.h"
46 #include "radixsort.h"
48 #define DEFAULT_SORT_FUNC_RADIXSORT mergesort
50 #define TINY_NODE(sl) ((sl)->tosort_num < 65)
51 #define SMALL_NODE(sl) ((sl)->tosort_num < 5)
53 /* are we sorting in reverse order ? */
54 static bool reverse_sort;
56 /* sort sub-levels array size */
57 static const size_t slsz = 256 * sizeof(struct sort_level*);
59 /* one sort level structure */
60 struct sort_level
62 struct sort_level **sublevels;
63 struct sort_list_item **leaves;
64 struct sort_list_item **sorted;
65 struct sort_list_item **tosort;
66 size_t leaves_num;
67 size_t leaves_sz;
68 size_t level;
69 size_t real_sln;
70 size_t start_position;
71 size_t sln;
72 size_t tosort_num;
73 size_t tosort_sz;
76 /* stack of sort levels ready to be sorted */
77 struct level_stack {
78 struct level_stack *next;
79 struct sort_level *sl;
82 static struct level_stack *g_ls;
84 #if defined(SORT_THREADS)
85 /* stack guarding mutex */
86 static pthread_mutex_t g_ls_mutex;
88 /* counter: how many items are left */
89 static size_t sort_left;
90 /* guarding mutex */
91 static pthread_mutex_t sort_left_mutex;
93 /* semaphore to count threads */
94 static sem_t mtsem;
97 * Decrement items counter
99 static inline void
100 sort_left_dec(size_t n)
103 pthread_mutex_lock(&sort_left_mutex);
104 sort_left -= n;
105 pthread_mutex_unlock(&sort_left_mutex);
109 * Do we have something to sort ?
111 static inline bool
112 have_sort_left(void)
114 bool ret;
116 pthread_mutex_lock(&sort_left_mutex);
117 ret = (sort_left > 0);
118 pthread_mutex_unlock(&sort_left_mutex);
119 return (ret);
122 #else
124 #define sort_left_dec(n)
126 #endif /* SORT_THREADS */
129 * Push sort level to the stack
131 static inline void
132 push_ls(struct sort_level *sl)
134 struct level_stack *new_ls;
136 new_ls = sort_malloc(sizeof(struct level_stack));
137 new_ls->sl = sl;
139 #if defined(SORT_THREADS)
140 if (nthreads > 1)
141 pthread_mutex_lock(&g_ls_mutex);
142 #endif
144 new_ls->next = g_ls;
145 g_ls = new_ls;
147 #if defined(SORT_THREADS)
148 if (nthreads > 1)
149 pthread_mutex_unlock(&g_ls_mutex);
150 #endif
154 * Pop sort level from the stack (single-threaded style)
156 static inline struct sort_level*
157 pop_ls_st(void)
159 struct sort_level *sl;
161 if (g_ls) {
162 struct level_stack *saved_ls;
164 sl = g_ls->sl;
165 saved_ls = g_ls;
166 g_ls = g_ls->next;
167 sort_free(saved_ls);
168 } else
169 sl = NULL;
171 return (sl);
174 #if defined(SORT_THREADS)
177 * Pop sort level from the stack (multi-threaded style)
179 static inline struct sort_level*
180 pop_ls_mt(void)
182 struct level_stack *saved_ls;
183 struct sort_level *sl;
185 pthread_mutex_lock(&g_ls_mutex);
187 if (g_ls) {
188 sl = g_ls->sl;
189 saved_ls = g_ls;
190 g_ls = g_ls->next;
191 } else {
192 sl = NULL;
193 saved_ls = NULL;
196 pthread_mutex_unlock(&g_ls_mutex);
198 sort_free(saved_ls);
200 return (sl);
203 #endif /* defined(SORT_THREADS) */
205 static void
206 add_to_sublevel(struct sort_level *sl, struct sort_list_item *item, size_t indx)
208 struct sort_level *ssl;
210 ssl = sl->sublevels[indx];
212 if (ssl == NULL) {
213 ssl = sort_malloc(sizeof(struct sort_level));
214 memset(ssl, 0, sizeof(struct sort_level));
216 ssl->level = sl->level + 1;
217 sl->sublevels[indx] = ssl;
219 ++(sl->real_sln);
222 if (++(ssl->tosort_num) > ssl->tosort_sz) {
223 ssl->tosort_sz = ssl->tosort_num + 128;
224 ssl->tosort = sort_realloc(ssl->tosort,
225 sizeof(struct sort_list_item*) * (ssl->tosort_sz));
228 ssl->tosort[ssl->tosort_num - 1] = item;
231 static inline void
232 add_leaf(struct sort_level *sl, struct sort_list_item *item)
235 if (++(sl->leaves_num) > sl->leaves_sz) {
236 sl->leaves_sz = sl->leaves_num + 128;
237 sl->leaves = sort_realloc(sl->leaves,
238 (sizeof(struct sort_list_item*) * (sl->leaves_sz)));
240 sl->leaves[sl->leaves_num - 1] = item;
243 static inline int
244 get_wc_index(struct sort_list_item *sli, size_t level)
246 const struct bwstring *bws;
248 bws = sli->ka.key[0].k;
250 if ((BWSLEN(bws) > level))
251 return (unsigned char) BWS_GET(bws,level);
252 return (-1);
255 static void
256 place_item(struct sort_level *sl, size_t item)
258 struct sort_list_item *sli;
259 int c;
261 sli = sl->tosort[item];
262 c = get_wc_index(sli, sl->level);
264 if (c == -1)
265 add_leaf(sl, sli);
266 else
267 add_to_sublevel(sl, sli, c);
270 static void
271 free_sort_level(struct sort_level *sl)
274 if (sl) {
275 if (sl->leaves)
276 sort_free(sl->leaves);
278 if (sl->level > 0)
279 sort_free(sl->tosort);
281 if (sl->sublevels) {
282 struct sort_level *slc;
283 size_t sln;
285 sln = sl->sln;
287 for (size_t i = 0; i < sln; ++i) {
288 slc = sl->sublevels[i];
289 if (slc)
290 free_sort_level(slc);
293 sort_free(sl->sublevels);
296 sort_free(sl);
300 static void
301 run_sort_level_next(struct sort_level *sl)
303 struct sort_level *slc;
304 size_t i, sln, tosort_num;
306 if (sl->sublevels) {
307 sort_free(sl->sublevels);
308 sl->sublevels = NULL;
311 switch (sl->tosort_num) {
312 case 0:
313 goto end;
314 case (1):
315 sl->sorted[sl->start_position] = sl->tosort[0];
316 sort_left_dec(1);
317 goto end;
318 case (2):
319 if (list_coll_offset(&(sl->tosort[0]), &(sl->tosort[1]),
320 sl->level) > 0) {
321 sl->sorted[sl->start_position++] = sl->tosort[1];
322 sl->sorted[sl->start_position] = sl->tosort[0];
323 } else {
324 sl->sorted[sl->start_position++] = sl->tosort[0];
325 sl->sorted[sl->start_position] = sl->tosort[1];
327 sort_left_dec(2);
329 goto end;
330 default:
331 if (TINY_NODE(sl) || (sl->level > 15)) {
332 listcoll_t func;
334 func = get_list_call_func(sl->level);
336 sl->leaves = sl->tosort;
337 sl->leaves_num = sl->tosort_num;
338 sl->leaves_sz = sl->leaves_num;
339 sl->leaves = sort_realloc(sl->leaves,
340 (sizeof(struct sort_list_item *) *
341 (sl->leaves_sz)));
342 sl->tosort = NULL;
343 sl->tosort_num = 0;
344 sl->tosort_sz = 0;
345 sl->sln = 0;
346 sl->real_sln = 0;
347 if (sort_opts_vals.sflag) {
348 if (mergesort(sl->leaves, sl->leaves_num,
349 sizeof(struct sort_list_item *),
350 (int(*)(const void *, const void *)) func) == -1)
351 /* NOTREACHED */
352 err(2, "Radix sort error 3");
353 } else
354 DEFAULT_SORT_FUNC_RADIXSORT(sl->leaves, sl->leaves_num,
355 sizeof(struct sort_list_item *),
356 (int(*)(const void *, const void *)) func);
358 memcpy(sl->sorted + sl->start_position,
359 sl->leaves, sl->leaves_num *
360 sizeof(struct sort_list_item*));
362 sort_left_dec(sl->leaves_num);
364 goto end;
365 } else {
366 sl->tosort_sz = sl->tosort_num;
367 sl->tosort = sort_realloc(sl->tosort,
368 sizeof(struct sort_list_item*) * (sl->tosort_sz));
372 sl->sln = 256;
373 sl->sublevels = sort_malloc(slsz);
374 memset(sl->sublevels, 0, slsz);
376 sl->real_sln = 0;
378 tosort_num = sl->tosort_num;
379 for (i = 0; i < tosort_num; ++i)
380 place_item(sl, i);
382 sort_free(sl->tosort);
383 sl->tosort = NULL;
384 sl->tosort_num = 0;
385 sl->tosort_sz = 0;
387 if (sl->leaves_num > 1) {
388 if (keys_num > 1) {
389 if (sort_opts_vals.sflag) {
390 mergesort(sl->leaves, sl->leaves_num,
391 sizeof(struct sort_list_item *),
392 (int(*)(const void *, const void *)) list_coll);
393 } else {
394 DEFAULT_SORT_FUNC_RADIXSORT(sl->leaves, sl->leaves_num,
395 sizeof(struct sort_list_item *),
396 (int(*)(const void *, const void *)) list_coll);
398 } else if (!sort_opts_vals.sflag && sort_opts_vals.complex_sort) {
399 DEFAULT_SORT_FUNC_RADIXSORT(sl->leaves, sl->leaves_num,
400 sizeof(struct sort_list_item *),
401 (int(*)(const void *, const void *)) list_coll_by_str_only);
405 sl->leaves_sz = sl->leaves_num;
406 sl->leaves = sort_realloc(sl->leaves, (sizeof(struct sort_list_item *) *
407 (sl->leaves_sz)));
409 if (!reverse_sort) {
410 memcpy(sl->sorted + sl->start_position, sl->leaves,
411 sl->leaves_num * sizeof(struct sort_list_item*));
412 sl->start_position += sl->leaves_num;
413 sort_left_dec(sl->leaves_num);
415 sort_free(sl->leaves);
416 sl->leaves = NULL;
417 sl->leaves_num = 0;
418 sl->leaves_sz = 0;
420 sln = sl->sln;
422 for (i = 0; i < sln; ++i) {
423 slc = sl->sublevels[i];
425 if (slc) {
426 slc->sorted = sl->sorted;
427 slc->start_position = sl->start_position;
428 sl->start_position += slc->tosort_num;
429 if (SMALL_NODE(slc))
430 run_sort_level_next(slc);
431 else
432 push_ls(slc);
433 sl->sublevels[i] = NULL;
437 } else {
438 size_t n;
440 sln = sl->sln;
442 for (i = 0; i < sln; ++i) {
443 n = sln - i - 1;
444 slc = sl->sublevels[n];
446 if (slc) {
447 slc->sorted = sl->sorted;
448 slc->start_position = sl->start_position;
449 sl->start_position += slc->tosort_num;
450 if (SMALL_NODE(slc))
451 run_sort_level_next(slc);
452 else
453 push_ls(slc);
454 sl->sublevels[n] = NULL;
458 memcpy(sl->sorted + sl->start_position, sl->leaves,
459 sl->leaves_num * sizeof(struct sort_list_item*));
460 sort_left_dec(sl->leaves_num);
463 end:
464 free_sort_level(sl);
468 * Single-threaded sort cycle
470 static void
471 run_sort_cycle_st(void)
473 struct sort_level *slc;
475 for (;;) {
476 slc = pop_ls_st();
477 if (slc == NULL) {
478 break;
480 run_sort_level_next(slc);
484 #if defined(SORT_THREADS)
487 * Multi-threaded sort cycle
489 static void
490 run_sort_cycle_mt(void)
492 struct sort_level *slc;
494 for (;;) {
495 slc = pop_ls_mt();
496 if (slc == NULL) {
497 if (have_sort_left()) {
498 pthread_yield();
499 continue;
501 break;
503 run_sort_level_next(slc);
508 * Sort cycle thread (in multi-threaded mode)
510 static void*
511 sort_thread(void* arg)
514 run_sort_cycle_mt();
516 sem_post(&mtsem);
518 return (arg);
521 #endif /* defined(SORT_THREADS) */
523 static void
524 run_top_sort_level(struct sort_level *sl)
526 struct sort_level *slc;
528 reverse_sort = sort_opts_vals.kflag ? keys[0].sm.rflag :
529 default_sort_mods->rflag;
531 sl->start_position = 0;
532 sl->sln = 256;
533 sl->sublevels = sort_malloc(slsz);
534 memset(sl->sublevels, 0, slsz);
536 for (size_t i = 0; i < sl->tosort_num; ++i)
537 place_item(sl, i);
539 if (sl->leaves_num > 1) {
540 if (keys_num > 1) {
541 if (sort_opts_vals.sflag) {
542 mergesort(sl->leaves, sl->leaves_num,
543 sizeof(struct sort_list_item *),
544 (int(*)(const void *, const void *)) list_coll);
545 } else {
546 DEFAULT_SORT_FUNC_RADIXSORT(sl->leaves, sl->leaves_num,
547 sizeof(struct sort_list_item *),
548 (int(*)(const void *, const void *)) list_coll);
550 } else if (!sort_opts_vals.sflag && sort_opts_vals.complex_sort) {
551 DEFAULT_SORT_FUNC_RADIXSORT(sl->leaves, sl->leaves_num,
552 sizeof(struct sort_list_item *),
553 (int(*)(const void *, const void *)) list_coll_by_str_only);
557 if (!reverse_sort) {
558 memcpy(sl->tosort + sl->start_position, sl->leaves,
559 sl->leaves_num * sizeof(struct sort_list_item*));
560 sl->start_position += sl->leaves_num;
561 sort_left_dec(sl->leaves_num);
563 for (size_t i = 0; i < sl->sln; ++i) {
564 slc = sl->sublevels[i];
566 if (slc) {
567 slc->sorted = sl->tosort;
568 slc->start_position = sl->start_position;
569 sl->start_position += slc->tosort_num;
570 push_ls(slc);
571 sl->sublevels[i] = NULL;
575 } else {
576 size_t n;
578 for (size_t i = 0; i < sl->sln; ++i) {
580 n = sl->sln - i - 1;
581 slc = sl->sublevels[n];
583 if (slc) {
584 slc->sorted = sl->tosort;
585 slc->start_position = sl->start_position;
586 sl->start_position += slc->tosort_num;
587 push_ls(slc);
588 sl->sublevels[n] = NULL;
592 memcpy(sl->tosort + sl->start_position, sl->leaves,
593 sl->leaves_num * sizeof(struct sort_list_item*));
595 sort_left_dec(sl->leaves_num);
598 #if defined(SORT_THREADS)
599 if (nthreads < 2) {
600 #endif
601 run_sort_cycle_st();
602 #if defined(SORT_THREADS)
603 } else {
604 size_t i;
606 for(i = 0; i < nthreads; ++i) {
607 pthread_attr_t attr;
608 pthread_t pth;
610 pthread_attr_init(&attr);
611 pthread_attr_setdetachstate(&attr,
612 PTHREAD_DETACHED);
614 for (;;) {
615 int res = pthread_create(&pth, &attr,
616 sort_thread, NULL);
617 if (res >= 0)
618 break;
619 if (errno == EAGAIN) {
620 pthread_yield();
621 continue;
623 err(2, NULL);
626 pthread_attr_destroy(&attr);
629 for(i = 0; i < nthreads; ++i)
630 sem_wait(&mtsem);
632 #endif /* defined(SORT_THREADS) */
635 static void
636 run_sort(struct sort_list_item **base, size_t nmemb)
638 struct sort_level *sl;
640 #if defined(SORT_THREADS)
641 size_t nthreads_save = nthreads;
642 if (nmemb < MT_SORT_THRESHOLD)
643 nthreads = 1;
645 if (nthreads > 1) {
646 pthread_mutexattr_t mattr;
648 pthread_mutexattr_init(&mattr);
649 pthread_mutexattr_settype(&mattr, PTHREAD_MUTEX_ERRORCHECK);
651 pthread_mutex_init(&g_ls_mutex, &mattr);
652 pthread_mutex_init(&sort_left_mutex, &mattr);
654 pthread_mutexattr_destroy(&mattr);
656 sem_init(&mtsem, 0, 0);
659 #endif
661 sl = sort_malloc(sizeof(struct sort_level));
662 memset(sl, 0, sizeof(struct sort_level));
664 sl->tosort = base;
665 sl->tosort_num = nmemb;
666 sl->tosort_sz = nmemb;
668 #if defined(SORT_THREADS)
669 sort_left = nmemb;
670 #endif
672 run_top_sort_level(sl);
674 free_sort_level(sl);
676 #if defined(SORT_THREADS)
677 if (nthreads > 1) {
678 sem_destroy(&mtsem);
679 pthread_mutex_destroy(&g_ls_mutex);
680 pthread_mutex_destroy(&sort_left_mutex);
682 nthreads = nthreads_save;
683 #endif
686 void
687 rxsort(struct sort_list_item **base, size_t nmemb)
690 run_sort(base, nmemb);