e1000e: fix cyclic resets at link up with active tx
[linux-stable.git] / lib / list_sort.c
blob85759928215b41f7be4ae56089eaaa7007f79659
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/bug.h>
4 #include <linux/compiler.h>
5 #include <linux/export.h>
6 #include <linux/string.h>
7 #include <linux/list_sort.h>
8 #include <linux/list.h>
10 #define MAX_LIST_LENGTH_BITS 20
13 * Returns a list organized in an intermediate format suited
14 * to chaining of merge() calls: null-terminated, no reserved or
15 * sentinel head node, "prev" links not maintained.
17 static struct list_head *merge(void *priv,
18 int (*cmp)(void *priv, struct list_head *a,
19 struct list_head *b),
20 struct list_head *a, struct list_head *b)
22 struct list_head head, *tail = &head;
24 while (a && b) {
25 /* if equal, take 'a' -- important for sort stability */
26 if ((*cmp)(priv, a, b) <= 0) {
27 tail->next = a;
28 a = a->next;
29 } else {
30 tail->next = b;
31 b = b->next;
33 tail = tail->next;
35 tail->next = a?:b;
36 return head.next;
40 * Combine final list merge with restoration of standard doubly-linked
41 * list structure. This approach duplicates code from merge(), but
42 * runs faster than the tidier alternatives of either a separate final
43 * prev-link restoration pass, or maintaining the prev links
44 * throughout.
46 static void merge_and_restore_back_links(void *priv,
47 int (*cmp)(void *priv, struct list_head *a,
48 struct list_head *b),
49 struct list_head *head,
50 struct list_head *a, struct list_head *b)
52 struct list_head *tail = head;
53 u8 count = 0;
55 while (a && b) {
56 /* if equal, take 'a' -- important for sort stability */
57 if ((*cmp)(priv, a, b) <= 0) {
58 tail->next = a;
59 a->prev = tail;
60 a = a->next;
61 } else {
62 tail->next = b;
63 b->prev = tail;
64 b = b->next;
66 tail = tail->next;
68 tail->next = a ? : b;
70 do {
72 * In worst cases this loop may run many iterations.
73 * Continue callbacks to the client even though no
74 * element comparison is needed, so the client's cmp()
75 * routine can invoke cond_resched() periodically.
77 if (unlikely(!(++count)))
78 (*cmp)(priv, tail->next, tail->next);
80 tail->next->prev = tail;
81 tail = tail->next;
82 } while (tail->next);
84 tail->next = head;
85 head->prev = tail;
88 /**
89 * list_sort - sort a list
90 * @priv: private data, opaque to list_sort(), passed to @cmp
91 * @head: the list to sort
92 * @cmp: the elements comparison function
94 * This function implements "merge sort", which has O(nlog(n))
95 * complexity.
97 * The comparison function @cmp must return a negative value if @a
98 * should sort before @b, and a positive value if @a should sort after
99 * @b. If @a and @b are equivalent, and their original relative
100 * ordering is to be preserved, @cmp must return 0.
102 void list_sort(void *priv, struct list_head *head,
103 int (*cmp)(void *priv, struct list_head *a,
104 struct list_head *b))
106 struct list_head *part[MAX_LIST_LENGTH_BITS+1]; /* sorted partial lists
107 -- last slot is a sentinel */
108 int lev; /* index into part[] */
109 int max_lev = 0;
110 struct list_head *list;
112 if (list_empty(head))
113 return;
115 memset(part, 0, sizeof(part));
117 head->prev->next = NULL;
118 list = head->next;
120 while (list) {
121 struct list_head *cur = list;
122 list = list->next;
123 cur->next = NULL;
125 for (lev = 0; part[lev]; lev++) {
126 cur = merge(priv, cmp, part[lev], cur);
127 part[lev] = NULL;
129 if (lev > max_lev) {
130 if (unlikely(lev >= ARRAY_SIZE(part)-1)) {
131 printk_once(KERN_DEBUG "list too long for efficiency\n");
132 lev--;
134 max_lev = lev;
136 part[lev] = cur;
139 for (lev = 0; lev < max_lev; lev++)
140 if (part[lev])
141 list = merge(priv, cmp, part[lev], list);
143 merge_and_restore_back_links(priv, cmp, head, part[max_lev], list);
145 EXPORT_SYMBOL(list_sort);