docs - Update shm_open(3) manual page
[dragonfly.git] / usr.bin / sort / radixsort.c
blobbcec1c5af06a3a34b2343d6b6fa3033b88ea765b
1 /*-
2 * Copyright (C) 2012 Oleg Moskalenko <mom040267@gmail.com>
3 * Copyright (C) 2012 Gabor Kovesdan <gabor@FreeBSD.org>
4 * All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
27 * $FreeBSD: head/usr.bin/sort/radixsort.c 281133 2015-04-06 03:02:20Z pfg $
31 #include <errno.h>
32 #include <err.h>
33 #include <langinfo.h>
34 #include <math.h>
35 #if defined(SORT_THREADS)
36 #include <pthread.h>
37 #include <semaphore.h>
38 #endif
39 #include <stdlib.h>
40 #include <string.h>
41 #include <wchar.h>
42 #include <wctype.h>
43 #include <unistd.h>
45 #include "coll.h"
46 #include "radixsort.h"
48 #define DEFAULT_SORT_FUNC_RADIXSORT mergesort
50 #define TINY_NODE(sl) ((sl)->tosort_num < 65)
51 #define SMALL_NODE(sl) ((sl)->tosort_num < 5)
53 /* are we sorting in reverse order ? */
54 static bool reverse_sort;
56 /* sort sub-levels array size */
57 static const size_t slsz = 256 * sizeof(struct sort_level*);
59 /* one sort level structure */
60 struct sort_level
62 struct sort_level **sublevels;
63 struct sort_list_item **leaves;
64 struct sort_list_item **sorted;
65 struct sort_list_item **tosort;
66 size_t leaves_num;
67 size_t leaves_sz;
68 size_t level;
69 size_t real_sln;
70 size_t start_position;
71 size_t sln;
72 size_t tosort_num;
73 size_t tosort_sz;
76 /* stack of sort levels ready to be sorted */
77 struct level_stack {
78 struct level_stack *next;
79 struct sort_level *sl;
82 static struct level_stack *g_ls;
84 #if defined(SORT_THREADS)
85 /* stack guarding mutex */
86 static pthread_cond_t g_ls_cond;
87 static pthread_mutex_t g_ls_mutex;
89 /* counter: how many items are left */
90 static size_t sort_left;
91 /* guarding mutex */
93 /* semaphore to count threads */
94 static sem_t mtsem;
97 * Decrement items counter
99 static inline void
100 sort_left_dec(size_t n)
102 pthread_mutex_lock(&g_ls_mutex);
103 sort_left -= n;
104 if (sort_left == 0 && nthreads > 1) {
105 pthread_cond_broadcast(&g_ls_cond);
107 pthread_mutex_unlock(&g_ls_mutex);
111 * Do we have something to sort ?
113 * This routine does not need to be locked.
115 static inline bool
116 have_sort_left(void)
118 bool ret;
120 ret = (sort_left > 0);
122 return (ret);
125 #else
127 #define sort_left_dec(n)
129 #endif /* SORT_THREADS */
132 * Push sort level to the stack
134 static inline void
135 push_ls(struct sort_level *sl)
137 struct level_stack *new_ls;
139 new_ls = sort_malloc(sizeof(struct level_stack));
140 new_ls->sl = sl;
142 #if defined(SORT_THREADS)
143 if (nthreads > 1)
144 pthread_mutex_lock(&g_ls_mutex);
145 #endif
147 new_ls->next = g_ls;
148 g_ls = new_ls;
150 #if defined(SORT_THREADS)
151 if (nthreads > 1) {
152 pthread_cond_signal(&g_ls_cond);
154 #endif
156 #if defined(SORT_THREADS)
157 if (nthreads > 1)
158 pthread_mutex_unlock(&g_ls_mutex);
159 #endif
163 * Pop sort level from the stack (single-threaded style)
165 static inline struct sort_level*
166 pop_ls_st(void)
168 struct sort_level *sl;
170 if (g_ls) {
171 struct level_stack *saved_ls;
173 sl = g_ls->sl;
174 saved_ls = g_ls;
175 g_ls = g_ls->next;
176 sort_free(saved_ls);
177 } else
178 sl = NULL;
180 return (sl);
183 #if defined(SORT_THREADS)
186 * Pop sort level from the stack (multi-threaded style)
188 static inline struct sort_level*
189 pop_ls_mt(void)
191 struct level_stack *saved_ls;
192 struct sort_level *sl;
194 pthread_mutex_lock(&g_ls_mutex);
196 for (;;) {
197 if (g_ls) {
198 sl = g_ls->sl;
199 saved_ls = g_ls;
200 g_ls = g_ls->next;
201 break;
203 sl = NULL;
204 saved_ls = NULL;
206 if (have_sort_left() == 0)
207 break;
208 pthread_cond_wait(&g_ls_cond, &g_ls_mutex);
211 pthread_mutex_unlock(&g_ls_mutex);
213 sort_free(saved_ls);
215 return (sl);
218 #endif /* defined(SORT_THREADS) */
220 static void
221 add_to_sublevel(struct sort_level *sl, struct sort_list_item *item, size_t indx)
223 struct sort_level *ssl;
225 ssl = sl->sublevels[indx];
227 if (ssl == NULL) {
228 ssl = sort_malloc(sizeof(struct sort_level));
229 memset(ssl, 0, sizeof(struct sort_level));
231 ssl->level = sl->level + 1;
232 sl->sublevels[indx] = ssl;
234 ++(sl->real_sln);
237 if (++(ssl->tosort_num) > ssl->tosort_sz) {
238 ssl->tosort_sz = ssl->tosort_num + 128;
239 ssl->tosort = sort_realloc(ssl->tosort,
240 sizeof(struct sort_list_item*) * (ssl->tosort_sz));
243 ssl->tosort[ssl->tosort_num - 1] = item;
246 static inline void
247 add_leaf(struct sort_level *sl, struct sort_list_item *item)
250 if (++(sl->leaves_num) > sl->leaves_sz) {
251 sl->leaves_sz = sl->leaves_num + 128;
252 sl->leaves = sort_realloc(sl->leaves,
253 (sizeof(struct sort_list_item*) * (sl->leaves_sz)));
255 sl->leaves[sl->leaves_num - 1] = item;
258 static inline int
259 get_wc_index(struct sort_list_item *sli, size_t level)
261 const struct bwstring *bws;
263 bws = sli->ka.key[0].k;
265 if ((BWSLEN(bws) > level))
266 return (unsigned char) BWS_GET(bws,level);
267 return (-1);
270 static void
271 place_item(struct sort_level *sl, size_t item)
273 struct sort_list_item *sli;
274 int c;
276 sli = sl->tosort[item];
277 c = get_wc_index(sli, sl->level);
279 if (c == -1)
280 add_leaf(sl, sli);
281 else
282 add_to_sublevel(sl, sli, c);
285 static void
286 free_sort_level(struct sort_level *sl)
289 if (sl) {
290 if (sl->leaves)
291 sort_free(sl->leaves);
293 if (sl->level > 0)
294 sort_free(sl->tosort);
296 if (sl->sublevels) {
297 struct sort_level *slc;
298 size_t sln;
300 sln = sl->sln;
302 for (size_t i = 0; i < sln; ++i) {
303 slc = sl->sublevels[i];
304 if (slc)
305 free_sort_level(slc);
308 sort_free(sl->sublevels);
311 sort_free(sl);
315 static void
316 run_sort_level_next(struct sort_level *sl)
318 struct sort_level *slc;
319 size_t i, sln, tosort_num;
321 if (sl->sublevels) {
322 sort_free(sl->sublevels);
323 sl->sublevels = NULL;
326 switch (sl->tosort_num) {
327 case 0:
328 goto end;
329 case (1):
330 sl->sorted[sl->start_position] = sl->tosort[0];
331 sort_left_dec(1);
332 goto end;
333 case (2):
334 if (list_coll_offset(&(sl->tosort[0]), &(sl->tosort[1]),
335 sl->level) > 0) {
336 sl->sorted[sl->start_position++] = sl->tosort[1];
337 sl->sorted[sl->start_position] = sl->tosort[0];
338 } else {
339 sl->sorted[sl->start_position++] = sl->tosort[0];
340 sl->sorted[sl->start_position] = sl->tosort[1];
342 sort_left_dec(2);
344 goto end;
345 default:
346 if (TINY_NODE(sl) || (sl->level > 15)) {
347 listcoll_t func;
349 func = get_list_call_func(sl->level);
351 sl->leaves = sl->tosort;
352 sl->leaves_num = sl->tosort_num;
353 sl->leaves_sz = sl->leaves_num;
354 sl->leaves = sort_realloc(sl->leaves,
355 (sizeof(struct sort_list_item *) *
356 (sl->leaves_sz)));
357 sl->tosort = NULL;
358 sl->tosort_num = 0;
359 sl->tosort_sz = 0;
360 sl->sln = 0;
361 sl->real_sln = 0;
362 if (sort_opts_vals.sflag) {
363 if (mergesort(sl->leaves, sl->leaves_num,
364 sizeof(struct sort_list_item *),
365 (int(*)(const void *, const void *)) func) == -1)
366 /* NOTREACHED */
367 err(2, "Radix sort error 3");
368 } else
369 DEFAULT_SORT_FUNC_RADIXSORT(sl->leaves, sl->leaves_num,
370 sizeof(struct sort_list_item *),
371 (int(*)(const void *, const void *)) func);
373 memcpy(sl->sorted + sl->start_position,
374 sl->leaves, sl->leaves_num *
375 sizeof(struct sort_list_item*));
377 sort_left_dec(sl->leaves_num);
379 goto end;
380 } else {
381 sl->tosort_sz = sl->tosort_num;
382 sl->tosort = sort_realloc(sl->tosort,
383 sizeof(struct sort_list_item*) * (sl->tosort_sz));
387 sl->sln = 256;
388 sl->sublevels = sort_malloc(slsz);
389 memset(sl->sublevels, 0, slsz);
391 sl->real_sln = 0;
393 tosort_num = sl->tosort_num;
394 for (i = 0; i < tosort_num; ++i)
395 place_item(sl, i);
397 sort_free(sl->tosort);
398 sl->tosort = NULL;
399 sl->tosort_num = 0;
400 sl->tosort_sz = 0;
402 if (sl->leaves_num > 1) {
403 if (keys_num > 1) {
404 if (sort_opts_vals.sflag) {
405 mergesort(sl->leaves, sl->leaves_num,
406 sizeof(struct sort_list_item *),
407 (int(*)(const void *, const void *)) list_coll);
408 } else {
409 DEFAULT_SORT_FUNC_RADIXSORT(sl->leaves, sl->leaves_num,
410 sizeof(struct sort_list_item *),
411 (int(*)(const void *, const void *)) list_coll);
413 } else if (!sort_opts_vals.sflag && sort_opts_vals.complex_sort) {
414 DEFAULT_SORT_FUNC_RADIXSORT(sl->leaves, sl->leaves_num,
415 sizeof(struct sort_list_item *),
416 (int(*)(const void *, const void *)) list_coll_by_str_only);
420 sl->leaves_sz = sl->leaves_num;
421 sl->leaves = sort_realloc(sl->leaves, (sizeof(struct sort_list_item *) *
422 (sl->leaves_sz)));
424 if (!reverse_sort) {
425 memcpy(sl->sorted + sl->start_position, sl->leaves,
426 sl->leaves_num * sizeof(struct sort_list_item*));
427 sl->start_position += sl->leaves_num;
428 sort_left_dec(sl->leaves_num);
430 sort_free(sl->leaves);
431 sl->leaves = NULL;
432 sl->leaves_num = 0;
433 sl->leaves_sz = 0;
435 sln = sl->sln;
437 for (i = 0; i < sln; ++i) {
438 slc = sl->sublevels[i];
440 if (slc) {
441 slc->sorted = sl->sorted;
442 slc->start_position = sl->start_position;
443 sl->start_position += slc->tosort_num;
444 if (SMALL_NODE(slc))
445 run_sort_level_next(slc);
446 else
447 push_ls(slc);
448 sl->sublevels[i] = NULL;
452 } else {
453 size_t n;
455 sln = sl->sln;
457 for (i = 0; i < sln; ++i) {
458 n = sln - i - 1;
459 slc = sl->sublevels[n];
461 if (slc) {
462 slc->sorted = sl->sorted;
463 slc->start_position = sl->start_position;
464 sl->start_position += slc->tosort_num;
465 if (SMALL_NODE(slc))
466 run_sort_level_next(slc);
467 else
468 push_ls(slc);
469 sl->sublevels[n] = NULL;
473 memcpy(sl->sorted + sl->start_position, sl->leaves,
474 sl->leaves_num * sizeof(struct sort_list_item*));
475 sort_left_dec(sl->leaves_num);
478 end:
479 free_sort_level(sl);
483 * Single-threaded sort cycle
485 static void
486 run_sort_cycle_st(void)
488 struct sort_level *slc;
490 for (;;) {
491 slc = pop_ls_st();
492 if (slc == NULL) {
493 break;
495 run_sort_level_next(slc);
499 #if defined(SORT_THREADS)
502 * Multi-threaded sort cycle
504 static void
505 run_sort_cycle_mt(void)
507 struct sort_level *slc;
509 for (;;) {
510 slc = pop_ls_mt();
511 if (slc == NULL)
512 break;
513 run_sort_level_next(slc);
518 * Sort cycle thread (in multi-threaded mode)
520 static void*
521 sort_thread(void* arg)
523 run_sort_cycle_mt();
524 sem_post(&mtsem);
526 return (arg);
529 #endif /* defined(SORT_THREADS) */
531 static void
532 run_top_sort_level(struct sort_level *sl)
534 struct sort_level *slc;
536 reverse_sort = sort_opts_vals.kflag ? keys[0].sm.rflag :
537 default_sort_mods->rflag;
539 sl->start_position = 0;
540 sl->sln = 256;
541 sl->sublevels = sort_malloc(slsz);
542 memset(sl->sublevels, 0, slsz);
544 for (size_t i = 0; i < sl->tosort_num; ++i)
545 place_item(sl, i);
547 if (sl->leaves_num > 1) {
548 if (keys_num > 1) {
549 if (sort_opts_vals.sflag) {
550 mergesort(sl->leaves, sl->leaves_num,
551 sizeof(struct sort_list_item *),
552 (int(*)(const void *, const void *)) list_coll);
553 } else {
554 DEFAULT_SORT_FUNC_RADIXSORT(sl->leaves, sl->leaves_num,
555 sizeof(struct sort_list_item *),
556 (int(*)(const void *, const void *)) list_coll);
558 } else if (!sort_opts_vals.sflag && sort_opts_vals.complex_sort) {
559 DEFAULT_SORT_FUNC_RADIXSORT(sl->leaves, sl->leaves_num,
560 sizeof(struct sort_list_item *),
561 (int(*)(const void *, const void *)) list_coll_by_str_only);
565 if (!reverse_sort) {
566 memcpy(sl->tosort + sl->start_position, sl->leaves,
567 sl->leaves_num * sizeof(struct sort_list_item*));
568 sl->start_position += sl->leaves_num;
569 sort_left_dec(sl->leaves_num);
571 for (size_t i = 0; i < sl->sln; ++i) {
572 slc = sl->sublevels[i];
574 if (slc) {
575 slc->sorted = sl->tosort;
576 slc->start_position = sl->start_position;
577 sl->start_position += slc->tosort_num;
578 push_ls(slc);
579 sl->sublevels[i] = NULL;
583 } else {
584 size_t n;
586 for (size_t i = 0; i < sl->sln; ++i) {
588 n = sl->sln - i - 1;
589 slc = sl->sublevels[n];
591 if (slc) {
592 slc->sorted = sl->tosort;
593 slc->start_position = sl->start_position;
594 sl->start_position += slc->tosort_num;
595 push_ls(slc);
596 sl->sublevels[n] = NULL;
600 memcpy(sl->tosort + sl->start_position, sl->leaves,
601 sl->leaves_num * sizeof(struct sort_list_item*));
603 sort_left_dec(sl->leaves_num);
606 #if defined(SORT_THREADS)
607 if (nthreads < 2) {
608 #endif
609 run_sort_cycle_st();
610 #if defined(SORT_THREADS)
611 } else {
612 size_t i;
614 for(i = 0; i < nthreads; ++i) {
615 pthread_attr_t attr;
616 pthread_t pth;
618 pthread_attr_init(&attr);
619 pthread_attr_setdetachstate(&attr, PTHREAD_DETACHED);
621 for (;;) {
622 int res = pthread_create(&pth, &attr,
623 sort_thread, NULL);
624 if (res >= 0)
625 break;
626 if (errno == EAGAIN) {
627 pthread_yield();
628 continue;
630 err(2, NULL);
633 pthread_attr_destroy(&attr);
636 for (i = 0; i < nthreads; ++i)
637 sem_wait(&mtsem);
639 #endif /* defined(SORT_THREADS) */
642 static void
643 run_sort(struct sort_list_item **base, size_t nmemb)
645 struct sort_level *sl;
647 #if defined(SORT_THREADS)
648 size_t nthreads_save = nthreads;
649 if (nmemb < MT_SORT_THRESHOLD)
650 nthreads = 1;
652 if (nthreads > 1) {
653 pthread_mutexattr_t mattr;
655 pthread_mutexattr_init(&mattr);
656 pthread_mutexattr_settype(&mattr, PTHREAD_MUTEX_ERRORCHECK);
658 pthread_mutex_init(&g_ls_mutex, &mattr);
659 pthread_cond_init(&g_ls_cond, NULL);
661 pthread_mutexattr_destroy(&mattr);
663 sem_init(&mtsem, 0, 0);
666 #endif
668 sl = sort_malloc(sizeof(struct sort_level));
669 memset(sl, 0, sizeof(struct sort_level));
671 sl->tosort = base;
672 sl->tosort_num = nmemb;
673 sl->tosort_sz = nmemb;
675 #if defined(SORT_THREADS)
676 sort_left = nmemb;
677 #endif
679 run_top_sort_level(sl);
681 free_sort_level(sl);
683 #if defined(SORT_THREADS)
684 if (nthreads > 1) {
685 sem_destroy(&mtsem);
686 pthread_mutex_destroy(&g_ls_mutex);
688 nthreads = nthreads_save;
689 #endif
692 void
693 rxsort(struct sort_list_item **base, size_t nmemb)
696 run_sort(base, nmemb);