Linux 4.19-rc7
[linux-2.6/btrfs-unstable.git] / net / netfilter / nft_set_rbtree.c
blob0e5ec126f6ad0516acf0576f01c4430dec43aec8
1 /*
2 * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * Development of this code funded by Astaro AG (http://www.astaro.com/)
9 */
11 #include <linux/kernel.h>
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/list.h>
15 #include <linux/rbtree.h>
16 #include <linux/netlink.h>
17 #include <linux/netfilter.h>
18 #include <linux/netfilter/nf_tables.h>
19 #include <net/netfilter/nf_tables.h>
21 struct nft_rbtree {
22 struct rb_root root;
23 rwlock_t lock;
24 seqcount_t count;
25 struct delayed_work gc_work;
28 struct nft_rbtree_elem {
29 struct rb_node node;
30 struct nft_set_ext ext;
33 static bool nft_rbtree_interval_end(const struct nft_rbtree_elem *rbe)
35 return nft_set_ext_exists(&rbe->ext, NFT_SET_EXT_FLAGS) &&
36 (*nft_set_ext_flags(&rbe->ext) & NFT_SET_ELEM_INTERVAL_END);
39 static bool nft_rbtree_equal(const struct nft_set *set, const void *this,
40 const struct nft_rbtree_elem *interval)
42 return memcmp(this, nft_set_ext_key(&interval->ext), set->klen) == 0;
45 static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
46 const u32 *key, const struct nft_set_ext **ext,
47 unsigned int seq)
49 struct nft_rbtree *priv = nft_set_priv(set);
50 const struct nft_rbtree_elem *rbe, *interval = NULL;
51 u8 genmask = nft_genmask_cur(net);
52 const struct rb_node *parent;
53 const void *this;
54 int d;
56 parent = rcu_dereference_raw(priv->root.rb_node);
57 while (parent != NULL) {
58 if (read_seqcount_retry(&priv->count, seq))
59 return false;
61 rbe = rb_entry(parent, struct nft_rbtree_elem, node);
63 this = nft_set_ext_key(&rbe->ext);
64 d = memcmp(this, key, set->klen);
65 if (d < 0) {
66 parent = rcu_dereference_raw(parent->rb_left);
67 if (interval &&
68 nft_rbtree_equal(set, this, interval) &&
69 nft_rbtree_interval_end(rbe) &&
70 !nft_rbtree_interval_end(interval))
71 continue;
72 interval = rbe;
73 } else if (d > 0)
74 parent = rcu_dereference_raw(parent->rb_right);
75 else {
76 if (!nft_set_elem_active(&rbe->ext, genmask)) {
77 parent = rcu_dereference_raw(parent->rb_left);
78 continue;
80 if (nft_rbtree_interval_end(rbe))
81 goto out;
83 *ext = &rbe->ext;
84 return true;
88 if (set->flags & NFT_SET_INTERVAL && interval != NULL &&
89 nft_set_elem_active(&interval->ext, genmask) &&
90 !nft_rbtree_interval_end(interval)) {
91 *ext = &interval->ext;
92 return true;
94 out:
95 return false;
98 static bool nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
99 const u32 *key, const struct nft_set_ext **ext)
101 struct nft_rbtree *priv = nft_set_priv(set);
102 unsigned int seq = read_seqcount_begin(&priv->count);
103 bool ret;
105 ret = __nft_rbtree_lookup(net, set, key, ext, seq);
106 if (ret || !read_seqcount_retry(&priv->count, seq))
107 return ret;
109 read_lock_bh(&priv->lock);
110 seq = read_seqcount_begin(&priv->count);
111 ret = __nft_rbtree_lookup(net, set, key, ext, seq);
112 read_unlock_bh(&priv->lock);
114 return ret;
117 static bool __nft_rbtree_get(const struct net *net, const struct nft_set *set,
118 const u32 *key, struct nft_rbtree_elem **elem,
119 unsigned int seq, unsigned int flags, u8 genmask)
121 struct nft_rbtree_elem *rbe, *interval = NULL;
122 struct nft_rbtree *priv = nft_set_priv(set);
123 const struct rb_node *parent;
124 const void *this;
125 int d;
127 parent = rcu_dereference_raw(priv->root.rb_node);
128 while (parent != NULL) {
129 if (read_seqcount_retry(&priv->count, seq))
130 return false;
132 rbe = rb_entry(parent, struct nft_rbtree_elem, node);
134 this = nft_set_ext_key(&rbe->ext);
135 d = memcmp(this, key, set->klen);
136 if (d < 0) {
137 parent = rcu_dereference_raw(parent->rb_left);
138 interval = rbe;
139 } else if (d > 0) {
140 parent = rcu_dereference_raw(parent->rb_right);
141 } else {
142 if (!nft_set_elem_active(&rbe->ext, genmask))
143 parent = rcu_dereference_raw(parent->rb_left);
145 if (!nft_set_ext_exists(&rbe->ext, NFT_SET_EXT_FLAGS) ||
146 (*nft_set_ext_flags(&rbe->ext) & NFT_SET_ELEM_INTERVAL_END) ==
147 (flags & NFT_SET_ELEM_INTERVAL_END)) {
148 *elem = rbe;
149 return true;
151 return false;
155 if (set->flags & NFT_SET_INTERVAL && interval != NULL &&
156 nft_set_elem_active(&interval->ext, genmask) &&
157 !nft_rbtree_interval_end(interval)) {
158 *elem = interval;
159 return true;
162 return false;
165 static void *nft_rbtree_get(const struct net *net, const struct nft_set *set,
166 const struct nft_set_elem *elem, unsigned int flags)
168 struct nft_rbtree *priv = nft_set_priv(set);
169 unsigned int seq = read_seqcount_begin(&priv->count);
170 struct nft_rbtree_elem *rbe = ERR_PTR(-ENOENT);
171 const u32 *key = (const u32 *)&elem->key.val;
172 u8 genmask = nft_genmask_cur(net);
173 bool ret;
175 ret = __nft_rbtree_get(net, set, key, &rbe, seq, flags, genmask);
176 if (ret || !read_seqcount_retry(&priv->count, seq))
177 return rbe;
179 read_lock_bh(&priv->lock);
180 seq = read_seqcount_begin(&priv->count);
181 ret = __nft_rbtree_get(net, set, key, &rbe, seq, flags, genmask);
182 if (!ret)
183 rbe = ERR_PTR(-ENOENT);
184 read_unlock_bh(&priv->lock);
186 return rbe;
189 static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
190 struct nft_rbtree_elem *new,
191 struct nft_set_ext **ext)
193 struct nft_rbtree *priv = nft_set_priv(set);
194 u8 genmask = nft_genmask_next(net);
195 struct nft_rbtree_elem *rbe;
196 struct rb_node *parent, **p;
197 int d;
199 parent = NULL;
200 p = &priv->root.rb_node;
201 while (*p != NULL) {
202 parent = *p;
203 rbe = rb_entry(parent, struct nft_rbtree_elem, node);
204 d = memcmp(nft_set_ext_key(&rbe->ext),
205 nft_set_ext_key(&new->ext),
206 set->klen);
207 if (d < 0)
208 p = &parent->rb_left;
209 else if (d > 0)
210 p = &parent->rb_right;
211 else {
212 if (nft_rbtree_interval_end(rbe) &&
213 !nft_rbtree_interval_end(new)) {
214 p = &parent->rb_left;
215 } else if (!nft_rbtree_interval_end(rbe) &&
216 nft_rbtree_interval_end(new)) {
217 p = &parent->rb_right;
218 } else if (nft_set_elem_active(&rbe->ext, genmask)) {
219 *ext = &rbe->ext;
220 return -EEXIST;
221 } else {
222 p = &parent->rb_left;
226 rb_link_node_rcu(&new->node, parent, p);
227 rb_insert_color(&new->node, &priv->root);
228 return 0;
231 static int nft_rbtree_insert(const struct net *net, const struct nft_set *set,
232 const struct nft_set_elem *elem,
233 struct nft_set_ext **ext)
235 struct nft_rbtree *priv = nft_set_priv(set);
236 struct nft_rbtree_elem *rbe = elem->priv;
237 int err;
239 write_lock_bh(&priv->lock);
240 write_seqcount_begin(&priv->count);
241 err = __nft_rbtree_insert(net, set, rbe, ext);
242 write_seqcount_end(&priv->count);
243 write_unlock_bh(&priv->lock);
245 return err;
248 static void nft_rbtree_remove(const struct net *net,
249 const struct nft_set *set,
250 const struct nft_set_elem *elem)
252 struct nft_rbtree *priv = nft_set_priv(set);
253 struct nft_rbtree_elem *rbe = elem->priv;
255 write_lock_bh(&priv->lock);
256 write_seqcount_begin(&priv->count);
257 rb_erase(&rbe->node, &priv->root);
258 write_seqcount_end(&priv->count);
259 write_unlock_bh(&priv->lock);
262 static void nft_rbtree_activate(const struct net *net,
263 const struct nft_set *set,
264 const struct nft_set_elem *elem)
266 struct nft_rbtree_elem *rbe = elem->priv;
268 nft_set_elem_change_active(net, set, &rbe->ext);
269 nft_set_elem_clear_busy(&rbe->ext);
272 static bool nft_rbtree_flush(const struct net *net,
273 const struct nft_set *set, void *priv)
275 struct nft_rbtree_elem *rbe = priv;
277 if (!nft_set_elem_mark_busy(&rbe->ext) ||
278 !nft_is_active(net, &rbe->ext)) {
279 nft_set_elem_change_active(net, set, &rbe->ext);
280 return true;
282 return false;
285 static void *nft_rbtree_deactivate(const struct net *net,
286 const struct nft_set *set,
287 const struct nft_set_elem *elem)
289 const struct nft_rbtree *priv = nft_set_priv(set);
290 const struct rb_node *parent = priv->root.rb_node;
291 struct nft_rbtree_elem *rbe, *this = elem->priv;
292 u8 genmask = nft_genmask_next(net);
293 int d;
295 while (parent != NULL) {
296 rbe = rb_entry(parent, struct nft_rbtree_elem, node);
298 d = memcmp(nft_set_ext_key(&rbe->ext), &elem->key.val,
299 set->klen);
300 if (d < 0)
301 parent = parent->rb_left;
302 else if (d > 0)
303 parent = parent->rb_right;
304 else {
305 if (!nft_set_elem_active(&rbe->ext, genmask)) {
306 parent = parent->rb_left;
307 continue;
309 if (nft_rbtree_interval_end(rbe) &&
310 !nft_rbtree_interval_end(this)) {
311 parent = parent->rb_left;
312 continue;
313 } else if (!nft_rbtree_interval_end(rbe) &&
314 nft_rbtree_interval_end(this)) {
315 parent = parent->rb_right;
316 continue;
318 nft_rbtree_flush(net, set, rbe);
319 return rbe;
322 return NULL;
325 static void nft_rbtree_walk(const struct nft_ctx *ctx,
326 struct nft_set *set,
327 struct nft_set_iter *iter)
329 struct nft_rbtree *priv = nft_set_priv(set);
330 struct nft_rbtree_elem *rbe;
331 struct nft_set_elem elem;
332 struct rb_node *node;
334 read_lock_bh(&priv->lock);
335 for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) {
336 rbe = rb_entry(node, struct nft_rbtree_elem, node);
338 if (iter->count < iter->skip)
339 goto cont;
340 if (!nft_set_elem_active(&rbe->ext, iter->genmask))
341 goto cont;
343 elem.priv = rbe;
345 iter->err = iter->fn(ctx, set, iter, &elem);
346 if (iter->err < 0) {
347 read_unlock_bh(&priv->lock);
348 return;
350 cont:
351 iter->count++;
353 read_unlock_bh(&priv->lock);
356 static void nft_rbtree_gc(struct work_struct *work)
358 struct nft_rbtree_elem *rbe, *rbe_end = NULL, *rbe_prev = NULL;
359 struct nft_set_gc_batch *gcb = NULL;
360 struct nft_rbtree *priv;
361 struct rb_node *node;
362 struct nft_set *set;
364 priv = container_of(work, struct nft_rbtree, gc_work.work);
365 set = nft_set_container_of(priv);
367 write_lock_bh(&priv->lock);
368 write_seqcount_begin(&priv->count);
369 for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) {
370 rbe = rb_entry(node, struct nft_rbtree_elem, node);
372 if (nft_rbtree_interval_end(rbe)) {
373 rbe_end = rbe;
374 continue;
376 if (!nft_set_elem_expired(&rbe->ext))
377 continue;
378 if (nft_set_elem_mark_busy(&rbe->ext))
379 continue;
381 if (rbe_prev) {
382 rb_erase(&rbe_prev->node, &priv->root);
383 rbe_prev = NULL;
385 gcb = nft_set_gc_batch_check(set, gcb, GFP_ATOMIC);
386 if (!gcb)
387 break;
389 atomic_dec(&set->nelems);
390 nft_set_gc_batch_add(gcb, rbe);
391 rbe_prev = rbe;
393 if (rbe_end) {
394 atomic_dec(&set->nelems);
395 nft_set_gc_batch_add(gcb, rbe_end);
396 rb_erase(&rbe_end->node, &priv->root);
397 rbe_end = NULL;
399 node = rb_next(node);
400 if (!node)
401 break;
403 if (rbe_prev)
404 rb_erase(&rbe_prev->node, &priv->root);
405 write_seqcount_end(&priv->count);
406 write_unlock_bh(&priv->lock);
408 nft_set_gc_batch_complete(gcb);
410 queue_delayed_work(system_power_efficient_wq, &priv->gc_work,
411 nft_set_gc_interval(set));
414 static u64 nft_rbtree_privsize(const struct nlattr * const nla[],
415 const struct nft_set_desc *desc)
417 return sizeof(struct nft_rbtree);
420 static int nft_rbtree_init(const struct nft_set *set,
421 const struct nft_set_desc *desc,
422 const struct nlattr * const nla[])
424 struct nft_rbtree *priv = nft_set_priv(set);
426 rwlock_init(&priv->lock);
427 seqcount_init(&priv->count);
428 priv->root = RB_ROOT;
430 INIT_DEFERRABLE_WORK(&priv->gc_work, nft_rbtree_gc);
431 if (set->flags & NFT_SET_TIMEOUT)
432 queue_delayed_work(system_power_efficient_wq, &priv->gc_work,
433 nft_set_gc_interval(set));
435 return 0;
438 static void nft_rbtree_destroy(const struct nft_set *set)
440 struct nft_rbtree *priv = nft_set_priv(set);
441 struct nft_rbtree_elem *rbe;
442 struct rb_node *node;
444 cancel_delayed_work_sync(&priv->gc_work);
445 rcu_barrier();
446 while ((node = priv->root.rb_node) != NULL) {
447 rb_erase(node, &priv->root);
448 rbe = rb_entry(node, struct nft_rbtree_elem, node);
449 nft_set_elem_destroy(set, rbe, true);
453 static bool nft_rbtree_estimate(const struct nft_set_desc *desc, u32 features,
454 struct nft_set_estimate *est)
456 if (desc->size)
457 est->size = sizeof(struct nft_rbtree) +
458 desc->size * sizeof(struct nft_rbtree_elem);
459 else
460 est->size = ~0;
462 est->lookup = NFT_SET_CLASS_O_LOG_N;
463 est->space = NFT_SET_CLASS_O_N;
465 return true;
468 struct nft_set_type nft_set_rbtree_type __read_mostly = {
469 .owner = THIS_MODULE,
470 .features = NFT_SET_INTERVAL | NFT_SET_MAP | NFT_SET_OBJECT | NFT_SET_TIMEOUT,
471 .ops = {
472 .privsize = nft_rbtree_privsize,
473 .elemsize = offsetof(struct nft_rbtree_elem, ext),
474 .estimate = nft_rbtree_estimate,
475 .init = nft_rbtree_init,
476 .destroy = nft_rbtree_destroy,
477 .insert = nft_rbtree_insert,
478 .remove = nft_rbtree_remove,
479 .deactivate = nft_rbtree_deactivate,
480 .flush = nft_rbtree_flush,
481 .activate = nft_rbtree_activate,
482 .lookup = nft_rbtree_lookup,
483 .walk = nft_rbtree_walk,
484 .get = nft_rbtree_get,