split dev_queue
[cor.git] / lib / generic-radix-tree.c
blobf25eb111c0516f0191477662a1a88211caca053e
2 #include <linux/export.h>
3 #include <linux/generic-radix-tree.h>
4 #include <linux/gfp.h>
5 #include <linux/kmemleak.h>
7 #define GENRADIX_ARY (PAGE_SIZE / sizeof(struct genradix_node *))
8 #define GENRADIX_ARY_SHIFT ilog2(GENRADIX_ARY)
10 struct genradix_node {
11 union {
12 /* Interior node: */
13 struct genradix_node *children[GENRADIX_ARY];
15 /* Leaf: */
16 u8 data[PAGE_SIZE];
20 static inline int genradix_depth_shift(unsigned depth)
22 return PAGE_SHIFT + GENRADIX_ARY_SHIFT * depth;
26 * Returns size (of data, in bytes) that a tree of a given depth holds:
28 static inline size_t genradix_depth_size(unsigned depth)
30 return 1UL << genradix_depth_shift(depth);
33 /* depth that's needed for a genradix that can address up to ULONG_MAX: */
34 #define GENRADIX_MAX_DEPTH \
35 DIV_ROUND_UP(BITS_PER_LONG - PAGE_SHIFT, GENRADIX_ARY_SHIFT)
37 #define GENRADIX_DEPTH_MASK \
38 ((unsigned long) (roundup_pow_of_two(GENRADIX_MAX_DEPTH + 1) - 1))
40 static inline unsigned genradix_root_to_depth(struct genradix_root *r)
42 return (unsigned long) r & GENRADIX_DEPTH_MASK;
45 static inline struct genradix_node *genradix_root_to_node(struct genradix_root *r)
47 return (void *) ((unsigned long) r & ~GENRADIX_DEPTH_MASK);
51 * Returns pointer to the specified byte @offset within @radix, or NULL if not
52 * allocated
54 void *__genradix_ptr(struct __genradix *radix, size_t offset)
56 struct genradix_root *r = READ_ONCE(radix->root);
57 struct genradix_node *n = genradix_root_to_node(r);
58 unsigned level = genradix_root_to_depth(r);
60 if (ilog2(offset) >= genradix_depth_shift(level))
61 return NULL;
63 while (1) {
64 if (!n)
65 return NULL;
66 if (!level)
67 break;
69 level--;
71 n = n->children[offset >> genradix_depth_shift(level)];
72 offset &= genradix_depth_size(level) - 1;
75 return &n->data[offset];
77 EXPORT_SYMBOL(__genradix_ptr);
79 static inline struct genradix_node *genradix_alloc_node(gfp_t gfp_mask)
81 struct genradix_node *node;
83 node = (struct genradix_node *)__get_free_page(gfp_mask|__GFP_ZERO);
86 * We're using pages (not slab allocations) directly for kernel data
87 * structures, so we need to explicitly inform kmemleak of them in order
88 * to avoid false positive memory leak reports.
90 kmemleak_alloc(node, PAGE_SIZE, 1, gfp_mask);
91 return node;
94 static inline void genradix_free_node(struct genradix_node *node)
96 kmemleak_free(node);
97 free_page((unsigned long)node);
101 * Returns pointer to the specified byte @offset within @radix, allocating it if
102 * necessary - newly allocated slots are always zeroed out:
104 void *__genradix_ptr_alloc(struct __genradix *radix, size_t offset,
105 gfp_t gfp_mask)
107 struct genradix_root *v = READ_ONCE(radix->root);
108 struct genradix_node *n, *new_node = NULL;
109 unsigned level;
111 /* Increase tree depth if necessary: */
112 while (1) {
113 struct genradix_root *r = v, *new_root;
115 n = genradix_root_to_node(r);
116 level = genradix_root_to_depth(r);
118 if (n && ilog2(offset) < genradix_depth_shift(level))
119 break;
121 if (!new_node) {
122 new_node = genradix_alloc_node(gfp_mask);
123 if (!new_node)
124 return NULL;
127 new_node->children[0] = n;
128 new_root = ((struct genradix_root *)
129 ((unsigned long) new_node | (n ? level + 1 : 0)));
131 if ((v = cmpxchg_release(&radix->root, r, new_root)) == r) {
132 v = new_root;
133 new_node = NULL;
137 while (level--) {
138 struct genradix_node **p =
139 &n->children[offset >> genradix_depth_shift(level)];
140 offset &= genradix_depth_size(level) - 1;
142 n = READ_ONCE(*p);
143 if (!n) {
144 if (!new_node) {
145 new_node = genradix_alloc_node(gfp_mask);
146 if (!new_node)
147 return NULL;
150 if (!(n = cmpxchg_release(p, NULL, new_node)))
151 swap(n, new_node);
155 if (new_node)
156 genradix_free_node(new_node);
158 return &n->data[offset];
160 EXPORT_SYMBOL(__genradix_ptr_alloc);
162 void *__genradix_iter_peek(struct genradix_iter *iter,
163 struct __genradix *radix,
164 size_t objs_per_page)
166 struct genradix_root *r;
167 struct genradix_node *n;
168 unsigned level, i;
169 restart:
170 r = READ_ONCE(radix->root);
171 if (!r)
172 return NULL;
174 n = genradix_root_to_node(r);
175 level = genradix_root_to_depth(r);
177 if (ilog2(iter->offset) >= genradix_depth_shift(level))
178 return NULL;
180 while (level) {
181 level--;
183 i = (iter->offset >> genradix_depth_shift(level)) &
184 (GENRADIX_ARY - 1);
186 while (!n->children[i]) {
187 i++;
188 iter->offset = round_down(iter->offset +
189 genradix_depth_size(level),
190 genradix_depth_size(level));
191 iter->pos = (iter->offset >> PAGE_SHIFT) *
192 objs_per_page;
193 if (i == GENRADIX_ARY)
194 goto restart;
197 n = n->children[i];
200 return &n->data[iter->offset & (PAGE_SIZE - 1)];
202 EXPORT_SYMBOL(__genradix_iter_peek);
204 static void genradix_free_recurse(struct genradix_node *n, unsigned level)
206 if (level) {
207 unsigned i;
209 for (i = 0; i < GENRADIX_ARY; i++)
210 if (n->children[i])
211 genradix_free_recurse(n->children[i], level - 1);
214 genradix_free_node(n);
217 int __genradix_prealloc(struct __genradix *radix, size_t size,
218 gfp_t gfp_mask)
220 size_t offset;
222 for (offset = 0; offset < size; offset += PAGE_SIZE)
223 if (!__genradix_ptr_alloc(radix, offset, gfp_mask))
224 return -ENOMEM;
226 return 0;
228 EXPORT_SYMBOL(__genradix_prealloc);
230 void __genradix_free(struct __genradix *radix)
232 struct genradix_root *r = xchg(&radix->root, NULL);
234 genradix_free_recurse(genradix_root_to_node(r),
235 genradix_root_to_depth(r));
237 EXPORT_SYMBOL(__genradix_free);