mwl8k: don't hardcode the number of transmit queues
[wandboard.git] / net / ipv4 / fib_trie.c
blobfe3c846b99a6dc10fd8b575341ee70b758b3e5d3
1 /*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version
5 * 2 of the License, or (at your option) any later version.
7 * Robert Olsson <robert.olsson@its.uu.se> Uppsala Universitet
8 * & Swedish University of Agricultural Sciences.
10 * Jens Laas <jens.laas@data.slu.se> Swedish University of
11 * Agricultural Sciences.
13 * Hans Liss <hans.liss@its.uu.se> Uppsala Universitet
15 * This work is based on the LPC-trie which is originally descibed in:
17 * An experimental study of compression methods for dynamic tries
18 * Stefan Nilsson and Matti Tikkanen. Algorithmica, 33(1):19-33, 2002.
19 * http://www.nada.kth.se/~snilsson/public/papers/dyntrie2/
22 * IP-address lookup using LC-tries. Stefan Nilsson and Gunnar Karlsson
23 * IEEE Journal on Selected Areas in Communications, 17(6):1083-1092, June 1999
26 * Code from fib_hash has been reused which includes the following header:
29 * INET An implementation of the TCP/IP protocol suite for the LINUX
30 * operating system. INET is implemented using the BSD Socket
31 * interface as the means of communication with the user level.
33 * IPv4 FIB: lookup engine and maintenance routines.
36 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
38 * This program is free software; you can redistribute it and/or
39 * modify it under the terms of the GNU General Public License
40 * as published by the Free Software Foundation; either version
41 * 2 of the License, or (at your option) any later version.
43 * Substantial contributions to this work comes from:
45 * David S. Miller, <davem@davemloft.net>
46 * Stephen Hemminger <shemminger@osdl.org>
47 * Paul E. McKenney <paulmck@us.ibm.com>
48 * Patrick McHardy <kaber@trash.net>
51 #define VERSION "0.408"
53 #include <asm/uaccess.h>
54 #include <asm/system.h>
55 #include <linux/bitops.h>
56 #include <linux/types.h>
57 #include <linux/kernel.h>
58 #include <linux/mm.h>
59 #include <linux/string.h>
60 #include <linux/socket.h>
61 #include <linux/sockios.h>
62 #include <linux/errno.h>
63 #include <linux/in.h>
64 #include <linux/inet.h>
65 #include <linux/inetdevice.h>
66 #include <linux/netdevice.h>
67 #include <linux/if_arp.h>
68 #include <linux/proc_fs.h>
69 #include <linux/rcupdate.h>
70 #include <linux/skbuff.h>
71 #include <linux/netlink.h>
72 #include <linux/init.h>
73 #include <linux/list.h>
74 #include <net/net_namespace.h>
75 #include <net/ip.h>
76 #include <net/protocol.h>
77 #include <net/route.h>
78 #include <net/tcp.h>
79 #include <net/sock.h>
80 #include <net/ip_fib.h>
81 #include "fib_lookup.h"
83 #define MAX_STAT_DEPTH 32
85 #define KEYLENGTH (8*sizeof(t_key))
87 typedef unsigned int t_key;
89 #define T_TNODE 0
90 #define T_LEAF 1
91 #define NODE_TYPE_MASK 0x1UL
92 #define NODE_TYPE(node) ((node)->parent & NODE_TYPE_MASK)
94 #define IS_TNODE(n) (!(n->parent & T_LEAF))
95 #define IS_LEAF(n) (n->parent & T_LEAF)
97 struct node {
98 unsigned long parent;
99 t_key key;
102 struct leaf {
103 unsigned long parent;
104 t_key key;
105 struct hlist_head list;
106 struct rcu_head rcu;
109 struct leaf_info {
110 struct hlist_node hlist;
111 struct rcu_head rcu;
112 int plen;
113 struct list_head falh;
116 struct tnode {
117 unsigned long parent;
118 t_key key;
119 unsigned char pos; /* 2log(KEYLENGTH) bits needed */
120 unsigned char bits; /* 2log(KEYLENGTH) bits needed */
121 unsigned int full_children; /* KEYLENGTH bits needed */
122 unsigned int empty_children; /* KEYLENGTH bits needed */
123 union {
124 struct rcu_head rcu;
125 struct work_struct work;
126 struct tnode *tnode_free;
128 struct node *child[0];
131 #ifdef CONFIG_IP_FIB_TRIE_STATS
132 struct trie_use_stats {
133 unsigned int gets;
134 unsigned int backtrack;
135 unsigned int semantic_match_passed;
136 unsigned int semantic_match_miss;
137 unsigned int null_node_hit;
138 unsigned int resize_node_skipped;
140 #endif
142 struct trie_stat {
143 unsigned int totdepth;
144 unsigned int maxdepth;
145 unsigned int tnodes;
146 unsigned int leaves;
147 unsigned int nullpointers;
148 unsigned int prefixes;
149 unsigned int nodesizes[MAX_STAT_DEPTH];
152 struct trie {
153 struct node *trie;
154 #ifdef CONFIG_IP_FIB_TRIE_STATS
155 struct trie_use_stats stats;
156 #endif
159 static void put_child(struct trie *t, struct tnode *tn, int i, struct node *n);
160 static void tnode_put_child_reorg(struct tnode *tn, int i, struct node *n,
161 int wasfull);
162 static struct node *resize(struct trie *t, struct tnode *tn);
163 static struct tnode *inflate(struct trie *t, struct tnode *tn);
164 static struct tnode *halve(struct trie *t, struct tnode *tn);
165 /* tnodes to free after resize(); protected by RTNL */
166 static struct tnode *tnode_free_head;
167 static size_t tnode_free_size;
170 * synchronize_rcu after call_rcu for that many pages; it should be especially
171 * useful before resizing the root node with PREEMPT_NONE configs; the value was
172 * obtained experimentally, aiming to avoid visible slowdown.
174 static const int sync_pages = 128;
176 static struct kmem_cache *fn_alias_kmem __read_mostly;
177 static struct kmem_cache *trie_leaf_kmem __read_mostly;
179 static inline struct tnode *node_parent(struct node *node)
181 return (struct tnode *)(node->parent & ~NODE_TYPE_MASK);
184 static inline struct tnode *node_parent_rcu(struct node *node)
186 struct tnode *ret = node_parent(node);
188 return rcu_dereference(ret);
191 /* Same as rcu_assign_pointer
192 * but that macro() assumes that value is a pointer.
194 static inline void node_set_parent(struct node *node, struct tnode *ptr)
196 smp_wmb();
197 node->parent = (unsigned long)ptr | NODE_TYPE(node);
200 static inline struct node *tnode_get_child(struct tnode *tn, unsigned int i)
202 BUG_ON(i >= 1U << tn->bits);
204 return tn->child[i];
207 static inline struct node *tnode_get_child_rcu(struct tnode *tn, unsigned int i)
209 struct node *ret = tnode_get_child(tn, i);
211 return rcu_dereference(ret);
214 static inline int tnode_child_length(const struct tnode *tn)
216 return 1 << tn->bits;
219 static inline t_key mask_pfx(t_key k, unsigned short l)
221 return (l == 0) ? 0 : k >> (KEYLENGTH-l) << (KEYLENGTH-l);
224 static inline t_key tkey_extract_bits(t_key a, int offset, int bits)
226 if (offset < KEYLENGTH)
227 return ((t_key)(a << offset)) >> (KEYLENGTH - bits);
228 else
229 return 0;
232 static inline int tkey_equals(t_key a, t_key b)
234 return a == b;
237 static inline int tkey_sub_equals(t_key a, int offset, int bits, t_key b)
239 if (bits == 0 || offset >= KEYLENGTH)
240 return 1;
241 bits = bits > KEYLENGTH ? KEYLENGTH : bits;
242 return ((a ^ b) << offset) >> (KEYLENGTH - bits) == 0;
245 static inline int tkey_mismatch(t_key a, int offset, t_key b)
247 t_key diff = a ^ b;
248 int i = offset;
250 if (!diff)
251 return 0;
252 while ((diff << i) >> (KEYLENGTH-1) == 0)
253 i++;
254 return i;
258 To understand this stuff, an understanding of keys and all their bits is
259 necessary. Every node in the trie has a key associated with it, but not
260 all of the bits in that key are significant.
262 Consider a node 'n' and its parent 'tp'.
264 If n is a leaf, every bit in its key is significant. Its presence is
265 necessitated by path compression, since during a tree traversal (when
266 searching for a leaf - unless we are doing an insertion) we will completely
267 ignore all skipped bits we encounter. Thus we need to verify, at the end of
268 a potentially successful search, that we have indeed been walking the
269 correct key path.
271 Note that we can never "miss" the correct key in the tree if present by
272 following the wrong path. Path compression ensures that segments of the key
273 that are the same for all keys with a given prefix are skipped, but the
274 skipped part *is* identical for each node in the subtrie below the skipped
275 bit! trie_insert() in this implementation takes care of that - note the
276 call to tkey_sub_equals() in trie_insert().
278 if n is an internal node - a 'tnode' here, the various parts of its key
279 have many different meanings.
281 Example:
282 _________________________________________________________________
283 | i | i | i | i | i | i | i | N | N | N | S | S | S | S | S | C |
284 -----------------------------------------------------------------
285 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
287 _________________________________________________________________
288 | C | C | C | u | u | u | u | u | u | u | u | u | u | u | u | u |
289 -----------------------------------------------------------------
290 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
292 tp->pos = 7
293 tp->bits = 3
294 n->pos = 15
295 n->bits = 4
297 First, let's just ignore the bits that come before the parent tp, that is
298 the bits from 0 to (tp->pos-1). They are *known* but at this point we do
299 not use them for anything.
301 The bits from (tp->pos) to (tp->pos + tp->bits - 1) - "N", above - are the
302 index into the parent's child array. That is, they will be used to find
303 'n' among tp's children.
305 The bits from (tp->pos + tp->bits) to (n->pos - 1) - "S" - are skipped bits
306 for the node n.
308 All the bits we have seen so far are significant to the node n. The rest
309 of the bits are really not needed or indeed known in n->key.
311 The bits from (n->pos) to (n->pos + n->bits - 1) - "C" - are the index into
312 n's child array, and will of course be different for each child.
315 The rest of the bits, from (n->pos + n->bits) onward, are completely unknown
316 at this point.
320 static inline void check_tnode(const struct tnode *tn)
322 WARN_ON(tn && tn->pos+tn->bits > 32);
325 static const int halve_threshold = 25;
326 static const int inflate_threshold = 50;
327 static const int halve_threshold_root = 15;
328 static const int inflate_threshold_root = 25;
330 static int inflate_threshold_root_fix;
331 #define INFLATE_FIX_MAX 10 /* a comment in resize() */
333 static void __alias_free_mem(struct rcu_head *head)
335 struct fib_alias *fa = container_of(head, struct fib_alias, rcu);
336 kmem_cache_free(fn_alias_kmem, fa);
339 static inline void alias_free_mem_rcu(struct fib_alias *fa)
341 call_rcu(&fa->rcu, __alias_free_mem);
344 static void __leaf_free_rcu(struct rcu_head *head)
346 struct leaf *l = container_of(head, struct leaf, rcu);
347 kmem_cache_free(trie_leaf_kmem, l);
350 static inline void free_leaf(struct leaf *l)
352 call_rcu_bh(&l->rcu, __leaf_free_rcu);
355 static void __leaf_info_free_rcu(struct rcu_head *head)
357 kfree(container_of(head, struct leaf_info, rcu));
360 static inline void free_leaf_info(struct leaf_info *leaf)
362 call_rcu(&leaf->rcu, __leaf_info_free_rcu);
365 static struct tnode *tnode_alloc(size_t size)
367 if (size <= PAGE_SIZE)
368 return kzalloc(size, GFP_KERNEL);
369 else
370 return __vmalloc(size, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL);
373 static void __tnode_vfree(struct work_struct *arg)
375 struct tnode *tn = container_of(arg, struct tnode, work);
376 vfree(tn);
379 static void __tnode_free_rcu(struct rcu_head *head)
381 struct tnode *tn = container_of(head, struct tnode, rcu);
382 size_t size = sizeof(struct tnode) +
383 (sizeof(struct node *) << tn->bits);
385 if (size <= PAGE_SIZE)
386 kfree(tn);
387 else {
388 INIT_WORK(&tn->work, __tnode_vfree);
389 schedule_work(&tn->work);
393 static inline void tnode_free(struct tnode *tn)
395 if (IS_LEAF(tn))
396 free_leaf((struct leaf *) tn);
397 else
398 call_rcu(&tn->rcu, __tnode_free_rcu);
401 static void tnode_free_safe(struct tnode *tn)
403 BUG_ON(IS_LEAF(tn));
404 tn->tnode_free = tnode_free_head;
405 tnode_free_head = tn;
406 tnode_free_size += sizeof(struct tnode) +
407 (sizeof(struct node *) << tn->bits);
410 static void tnode_free_flush(void)
412 struct tnode *tn;
414 while ((tn = tnode_free_head)) {
415 tnode_free_head = tn->tnode_free;
416 tn->tnode_free = NULL;
417 tnode_free(tn);
420 if (tnode_free_size >= PAGE_SIZE * sync_pages) {
421 tnode_free_size = 0;
422 synchronize_rcu();
426 static struct leaf *leaf_new(void)
428 struct leaf *l = kmem_cache_alloc(trie_leaf_kmem, GFP_KERNEL);
429 if (l) {
430 l->parent = T_LEAF;
431 INIT_HLIST_HEAD(&l->list);
433 return l;
436 static struct leaf_info *leaf_info_new(int plen)
438 struct leaf_info *li = kmalloc(sizeof(struct leaf_info), GFP_KERNEL);
439 if (li) {
440 li->plen = plen;
441 INIT_LIST_HEAD(&li->falh);
443 return li;
446 static struct tnode *tnode_new(t_key key, int pos, int bits)
448 size_t sz = sizeof(struct tnode) + (sizeof(struct node *) << bits);
449 struct tnode *tn = tnode_alloc(sz);
451 if (tn) {
452 tn->parent = T_TNODE;
453 tn->pos = pos;
454 tn->bits = bits;
455 tn->key = key;
456 tn->full_children = 0;
457 tn->empty_children = 1<<bits;
460 pr_debug("AT %p s=%u %lu\n", tn, (unsigned int) sizeof(struct tnode),
461 (unsigned long) (sizeof(struct node) << bits));
462 return tn;
466 * Check whether a tnode 'n' is "full", i.e. it is an internal node
467 * and no bits are skipped. See discussion in dyntree paper p. 6
470 static inline int tnode_full(const struct tnode *tn, const struct node *n)
472 if (n == NULL || IS_LEAF(n))
473 return 0;
475 return ((struct tnode *) n)->pos == tn->pos + tn->bits;
478 static inline void put_child(struct trie *t, struct tnode *tn, int i,
479 struct node *n)
481 tnode_put_child_reorg(tn, i, n, -1);
485 * Add a child at position i overwriting the old value.
486 * Update the value of full_children and empty_children.
489 static void tnode_put_child_reorg(struct tnode *tn, int i, struct node *n,
490 int wasfull)
492 struct node *chi = tn->child[i];
493 int isfull;
495 BUG_ON(i >= 1<<tn->bits);
497 /* update emptyChildren */
498 if (n == NULL && chi != NULL)
499 tn->empty_children++;
500 else if (n != NULL && chi == NULL)
501 tn->empty_children--;
503 /* update fullChildren */
504 if (wasfull == -1)
505 wasfull = tnode_full(tn, chi);
507 isfull = tnode_full(tn, n);
508 if (wasfull && !isfull)
509 tn->full_children--;
510 else if (!wasfull && isfull)
511 tn->full_children++;
513 if (n)
514 node_set_parent(n, tn);
516 rcu_assign_pointer(tn->child[i], n);
519 static struct node *resize(struct trie *t, struct tnode *tn)
521 int i;
522 int err = 0;
523 struct tnode *old_tn;
524 int inflate_threshold_use;
525 int halve_threshold_use;
526 int max_resize;
528 if (!tn)
529 return NULL;
531 pr_debug("In tnode_resize %p inflate_threshold=%d threshold=%d\n",
532 tn, inflate_threshold, halve_threshold);
534 /* No children */
535 if (tn->empty_children == tnode_child_length(tn)) {
536 tnode_free_safe(tn);
537 return NULL;
539 /* One child */
540 if (tn->empty_children == tnode_child_length(tn) - 1)
541 for (i = 0; i < tnode_child_length(tn); i++) {
542 struct node *n;
544 n = tn->child[i];
545 if (!n)
546 continue;
548 /* compress one level */
549 node_set_parent(n, NULL);
550 tnode_free_safe(tn);
551 return n;
554 * Double as long as the resulting node has a number of
555 * nonempty nodes that are above the threshold.
559 * From "Implementing a dynamic compressed trie" by Stefan Nilsson of
560 * the Helsinki University of Technology and Matti Tikkanen of Nokia
561 * Telecommunications, page 6:
562 * "A node is doubled if the ratio of non-empty children to all
563 * children in the *doubled* node is at least 'high'."
565 * 'high' in this instance is the variable 'inflate_threshold'. It
566 * is expressed as a percentage, so we multiply it with
567 * tnode_child_length() and instead of multiplying by 2 (since the
568 * child array will be doubled by inflate()) and multiplying
569 * the left-hand side by 100 (to handle the percentage thing) we
570 * multiply the left-hand side by 50.
572 * The left-hand side may look a bit weird: tnode_child_length(tn)
573 * - tn->empty_children is of course the number of non-null children
574 * in the current node. tn->full_children is the number of "full"
575 * children, that is non-null tnodes with a skip value of 0.
576 * All of those will be doubled in the resulting inflated tnode, so
577 * we just count them one extra time here.
579 * A clearer way to write this would be:
581 * to_be_doubled = tn->full_children;
582 * not_to_be_doubled = tnode_child_length(tn) - tn->empty_children -
583 * tn->full_children;
585 * new_child_length = tnode_child_length(tn) * 2;
587 * new_fill_factor = 100 * (not_to_be_doubled + 2*to_be_doubled) /
588 * new_child_length;
589 * if (new_fill_factor >= inflate_threshold)
591 * ...and so on, tho it would mess up the while () loop.
593 * anyway,
594 * 100 * (not_to_be_doubled + 2*to_be_doubled) / new_child_length >=
595 * inflate_threshold
597 * avoid a division:
598 * 100 * (not_to_be_doubled + 2*to_be_doubled) >=
599 * inflate_threshold * new_child_length
601 * expand not_to_be_doubled and to_be_doubled, and shorten:
602 * 100 * (tnode_child_length(tn) - tn->empty_children +
603 * tn->full_children) >= inflate_threshold * new_child_length
605 * expand new_child_length:
606 * 100 * (tnode_child_length(tn) - tn->empty_children +
607 * tn->full_children) >=
608 * inflate_threshold * tnode_child_length(tn) * 2
610 * shorten again:
611 * 50 * (tn->full_children + tnode_child_length(tn) -
612 * tn->empty_children) >= inflate_threshold *
613 * tnode_child_length(tn)
617 check_tnode(tn);
619 /* Keep root node larger */
621 if (!tn->parent)
622 inflate_threshold_use = inflate_threshold_root +
623 inflate_threshold_root_fix;
624 else
625 inflate_threshold_use = inflate_threshold;
627 err = 0;
628 max_resize = 10;
629 while ((tn->full_children > 0 && max_resize-- &&
630 50 * (tn->full_children + tnode_child_length(tn)
631 - tn->empty_children)
632 >= inflate_threshold_use * tnode_child_length(tn))) {
634 old_tn = tn;
635 tn = inflate(t, tn);
637 if (IS_ERR(tn)) {
638 tn = old_tn;
639 #ifdef CONFIG_IP_FIB_TRIE_STATS
640 t->stats.resize_node_skipped++;
641 #endif
642 break;
646 if (max_resize < 0) {
647 if (!tn->parent) {
649 * It was observed that during large updates even
650 * inflate_threshold_root = 35 might be needed to avoid
651 * this warning; but it should be temporary, so let's
652 * try to handle this automatically.
654 if (inflate_threshold_root_fix < INFLATE_FIX_MAX)
655 inflate_threshold_root_fix++;
656 else
657 pr_warning("Fix inflate_threshold_root."
658 " Now=%d size=%d bits fix=%d\n",
659 inflate_threshold_root, tn->bits,
660 inflate_threshold_root_fix);
661 } else {
662 pr_warning("Fix inflate_threshold."
663 " Now=%d size=%d bits\n",
664 inflate_threshold, tn->bits);
666 } else if (max_resize > 3 && !tn->parent && inflate_threshold_root_fix)
667 inflate_threshold_root_fix--;
669 check_tnode(tn);
672 * Halve as long as the number of empty children in this
673 * node is above threshold.
677 /* Keep root node larger */
679 if (!tn->parent)
680 halve_threshold_use = halve_threshold_root;
681 else
682 halve_threshold_use = halve_threshold;
684 err = 0;
685 max_resize = 10;
686 while (tn->bits > 1 && max_resize-- &&
687 100 * (tnode_child_length(tn) - tn->empty_children) <
688 halve_threshold_use * tnode_child_length(tn)) {
690 old_tn = tn;
691 tn = halve(t, tn);
692 if (IS_ERR(tn)) {
693 tn = old_tn;
694 #ifdef CONFIG_IP_FIB_TRIE_STATS
695 t->stats.resize_node_skipped++;
696 #endif
697 break;
701 if (max_resize < 0) {
702 if (!tn->parent)
703 pr_warning("Fix halve_threshold_root."
704 " Now=%d size=%d bits\n",
705 halve_threshold_root, tn->bits);
706 else
707 pr_warning("Fix halve_threshold."
708 " Now=%d size=%d bits\n",
709 halve_threshold, tn->bits);
712 /* Only one child remains */
713 if (tn->empty_children == tnode_child_length(tn) - 1)
714 for (i = 0; i < tnode_child_length(tn); i++) {
715 struct node *n;
717 n = tn->child[i];
718 if (!n)
719 continue;
721 /* compress one level */
723 node_set_parent(n, NULL);
724 tnode_free_safe(tn);
725 return n;
728 return (struct node *) tn;
731 static struct tnode *inflate(struct trie *t, struct tnode *tn)
733 struct tnode *oldtnode = tn;
734 int olen = tnode_child_length(tn);
735 int i;
737 pr_debug("In inflate\n");
739 tn = tnode_new(oldtnode->key, oldtnode->pos, oldtnode->bits + 1);
741 if (!tn)
742 return ERR_PTR(-ENOMEM);
745 * Preallocate and store tnodes before the actual work so we
746 * don't get into an inconsistent state if memory allocation
747 * fails. In case of failure we return the oldnode and inflate
748 * of tnode is ignored.
751 for (i = 0; i < olen; i++) {
752 struct tnode *inode;
754 inode = (struct tnode *) tnode_get_child(oldtnode, i);
755 if (inode &&
756 IS_TNODE(inode) &&
757 inode->pos == oldtnode->pos + oldtnode->bits &&
758 inode->bits > 1) {
759 struct tnode *left, *right;
760 t_key m = ~0U << (KEYLENGTH - 1) >> inode->pos;
762 left = tnode_new(inode->key&(~m), inode->pos + 1,
763 inode->bits - 1);
764 if (!left)
765 goto nomem;
767 right = tnode_new(inode->key|m, inode->pos + 1,
768 inode->bits - 1);
770 if (!right) {
771 tnode_free(left);
772 goto nomem;
775 put_child(t, tn, 2*i, (struct node *) left);
776 put_child(t, tn, 2*i+1, (struct node *) right);
780 for (i = 0; i < olen; i++) {
781 struct tnode *inode;
782 struct node *node = tnode_get_child(oldtnode, i);
783 struct tnode *left, *right;
784 int size, j;
786 /* An empty child */
787 if (node == NULL)
788 continue;
790 /* A leaf or an internal node with skipped bits */
792 if (IS_LEAF(node) || ((struct tnode *) node)->pos >
793 tn->pos + tn->bits - 1) {
794 if (tkey_extract_bits(node->key,
795 oldtnode->pos + oldtnode->bits,
796 1) == 0)
797 put_child(t, tn, 2*i, node);
798 else
799 put_child(t, tn, 2*i+1, node);
800 continue;
803 /* An internal node with two children */
804 inode = (struct tnode *) node;
806 if (inode->bits == 1) {
807 put_child(t, tn, 2*i, inode->child[0]);
808 put_child(t, tn, 2*i+1, inode->child[1]);
810 tnode_free_safe(inode);
811 continue;
814 /* An internal node with more than two children */
816 /* We will replace this node 'inode' with two new
817 * ones, 'left' and 'right', each with half of the
818 * original children. The two new nodes will have
819 * a position one bit further down the key and this
820 * means that the "significant" part of their keys
821 * (see the discussion near the top of this file)
822 * will differ by one bit, which will be "0" in
823 * left's key and "1" in right's key. Since we are
824 * moving the key position by one step, the bit that
825 * we are moving away from - the bit at position
826 * (inode->pos) - is the one that will differ between
827 * left and right. So... we synthesize that bit in the
828 * two new keys.
829 * The mask 'm' below will be a single "one" bit at
830 * the position (inode->pos)
833 /* Use the old key, but set the new significant
834 * bit to zero.
837 left = (struct tnode *) tnode_get_child(tn, 2*i);
838 put_child(t, tn, 2*i, NULL);
840 BUG_ON(!left);
842 right = (struct tnode *) tnode_get_child(tn, 2*i+1);
843 put_child(t, tn, 2*i+1, NULL);
845 BUG_ON(!right);
847 size = tnode_child_length(left);
848 for (j = 0; j < size; j++) {
849 put_child(t, left, j, inode->child[j]);
850 put_child(t, right, j, inode->child[j + size]);
852 put_child(t, tn, 2*i, resize(t, left));
853 put_child(t, tn, 2*i+1, resize(t, right));
855 tnode_free_safe(inode);
857 tnode_free_safe(oldtnode);
858 return tn;
859 nomem:
861 int size = tnode_child_length(tn);
862 int j;
864 for (j = 0; j < size; j++)
865 if (tn->child[j])
866 tnode_free((struct tnode *)tn->child[j]);
868 tnode_free(tn);
870 return ERR_PTR(-ENOMEM);
874 static struct tnode *halve(struct trie *t, struct tnode *tn)
876 struct tnode *oldtnode = tn;
877 struct node *left, *right;
878 int i;
879 int olen = tnode_child_length(tn);
881 pr_debug("In halve\n");
883 tn = tnode_new(oldtnode->key, oldtnode->pos, oldtnode->bits - 1);
885 if (!tn)
886 return ERR_PTR(-ENOMEM);
889 * Preallocate and store tnodes before the actual work so we
890 * don't get into an inconsistent state if memory allocation
891 * fails. In case of failure we return the oldnode and halve
892 * of tnode is ignored.
895 for (i = 0; i < olen; i += 2) {
896 left = tnode_get_child(oldtnode, i);
897 right = tnode_get_child(oldtnode, i+1);
899 /* Two nonempty children */
900 if (left && right) {
901 struct tnode *newn;
903 newn = tnode_new(left->key, tn->pos + tn->bits, 1);
905 if (!newn)
906 goto nomem;
908 put_child(t, tn, i/2, (struct node *)newn);
913 for (i = 0; i < olen; i += 2) {
914 struct tnode *newBinNode;
916 left = tnode_get_child(oldtnode, i);
917 right = tnode_get_child(oldtnode, i+1);
919 /* At least one of the children is empty */
920 if (left == NULL) {
921 if (right == NULL) /* Both are empty */
922 continue;
923 put_child(t, tn, i/2, right);
924 continue;
927 if (right == NULL) {
928 put_child(t, tn, i/2, left);
929 continue;
932 /* Two nonempty children */
933 newBinNode = (struct tnode *) tnode_get_child(tn, i/2);
934 put_child(t, tn, i/2, NULL);
935 put_child(t, newBinNode, 0, left);
936 put_child(t, newBinNode, 1, right);
937 put_child(t, tn, i/2, resize(t, newBinNode));
939 tnode_free_safe(oldtnode);
940 return tn;
941 nomem:
943 int size = tnode_child_length(tn);
944 int j;
946 for (j = 0; j < size; j++)
947 if (tn->child[j])
948 tnode_free((struct tnode *)tn->child[j]);
950 tnode_free(tn);
952 return ERR_PTR(-ENOMEM);
956 /* readside must use rcu_read_lock currently dump routines
957 via get_fa_head and dump */
959 static struct leaf_info *find_leaf_info(struct leaf *l, int plen)
961 struct hlist_head *head = &l->list;
962 struct hlist_node *node;
963 struct leaf_info *li;
965 hlist_for_each_entry_rcu(li, node, head, hlist)
966 if (li->plen == plen)
967 return li;
969 return NULL;
972 static inline struct list_head *get_fa_head(struct leaf *l, int plen)
974 struct leaf_info *li = find_leaf_info(l, plen);
976 if (!li)
977 return NULL;
979 return &li->falh;
982 static void insert_leaf_info(struct hlist_head *head, struct leaf_info *new)
984 struct leaf_info *li = NULL, *last = NULL;
985 struct hlist_node *node;
987 if (hlist_empty(head)) {
988 hlist_add_head_rcu(&new->hlist, head);
989 } else {
990 hlist_for_each_entry(li, node, head, hlist) {
991 if (new->plen > li->plen)
992 break;
994 last = li;
996 if (last)
997 hlist_add_after_rcu(&last->hlist, &new->hlist);
998 else
999 hlist_add_before_rcu(&new->hlist, &li->hlist);
1003 /* rcu_read_lock needs to be hold by caller from readside */
1005 static struct leaf *
1006 fib_find_node(struct trie *t, u32 key)
1008 int pos;
1009 struct tnode *tn;
1010 struct node *n;
1012 pos = 0;
1013 n = rcu_dereference(t->trie);
1015 while (n != NULL && NODE_TYPE(n) == T_TNODE) {
1016 tn = (struct tnode *) n;
1018 check_tnode(tn);
1020 if (tkey_sub_equals(tn->key, pos, tn->pos-pos, key)) {
1021 pos = tn->pos + tn->bits;
1022 n = tnode_get_child_rcu(tn,
1023 tkey_extract_bits(key,
1024 tn->pos,
1025 tn->bits));
1026 } else
1027 break;
1029 /* Case we have found a leaf. Compare prefixes */
1031 if (n != NULL && IS_LEAF(n) && tkey_equals(key, n->key))
1032 return (struct leaf *)n;
1034 return NULL;
1037 static void trie_rebalance(struct trie *t, struct tnode *tn)
1039 int wasfull;
1040 t_key cindex, key;
1041 struct tnode *tp;
1043 key = tn->key;
1045 while (tn != NULL && (tp = node_parent((struct node *)tn)) != NULL) {
1046 cindex = tkey_extract_bits(key, tp->pos, tp->bits);
1047 wasfull = tnode_full(tp, tnode_get_child(tp, cindex));
1048 tn = (struct tnode *) resize(t, (struct tnode *)tn);
1050 tnode_put_child_reorg((struct tnode *)tp, cindex,
1051 (struct node *)tn, wasfull);
1053 tp = node_parent((struct node *) tn);
1054 if (!tp)
1055 rcu_assign_pointer(t->trie, (struct node *)tn);
1057 tnode_free_flush();
1058 if (!tp)
1059 break;
1060 tn = tp;
1063 /* Handle last (top) tnode */
1064 if (IS_TNODE(tn))
1065 tn = (struct tnode *)resize(t, (struct tnode *)tn);
1067 rcu_assign_pointer(t->trie, (struct node *)tn);
1068 tnode_free_flush();
1070 return;
1073 /* only used from updater-side */
1075 static struct list_head *fib_insert_node(struct trie *t, u32 key, int plen)
1077 int pos, newpos;
1078 struct tnode *tp = NULL, *tn = NULL;
1079 struct node *n;
1080 struct leaf *l;
1081 int missbit;
1082 struct list_head *fa_head = NULL;
1083 struct leaf_info *li;
1084 t_key cindex;
1086 pos = 0;
1087 n = t->trie;
1089 /* If we point to NULL, stop. Either the tree is empty and we should
1090 * just put a new leaf in if, or we have reached an empty child slot,
1091 * and we should just put our new leaf in that.
1092 * If we point to a T_TNODE, check if it matches our key. Note that
1093 * a T_TNODE might be skipping any number of bits - its 'pos' need
1094 * not be the parent's 'pos'+'bits'!
1096 * If it does match the current key, get pos/bits from it, extract
1097 * the index from our key, push the T_TNODE and walk the tree.
1099 * If it doesn't, we have to replace it with a new T_TNODE.
1101 * If we point to a T_LEAF, it might or might not have the same key
1102 * as we do. If it does, just change the value, update the T_LEAF's
1103 * value, and return it.
1104 * If it doesn't, we need to replace it with a T_TNODE.
1107 while (n != NULL && NODE_TYPE(n) == T_TNODE) {
1108 tn = (struct tnode *) n;
1110 check_tnode(tn);
1112 if (tkey_sub_equals(tn->key, pos, tn->pos-pos, key)) {
1113 tp = tn;
1114 pos = tn->pos + tn->bits;
1115 n = tnode_get_child(tn,
1116 tkey_extract_bits(key,
1117 tn->pos,
1118 tn->bits));
1120 BUG_ON(n && node_parent(n) != tn);
1121 } else
1122 break;
1126 * n ----> NULL, LEAF or TNODE
1128 * tp is n's (parent) ----> NULL or TNODE
1131 BUG_ON(tp && IS_LEAF(tp));
1133 /* Case 1: n is a leaf. Compare prefixes */
1135 if (n != NULL && IS_LEAF(n) && tkey_equals(key, n->key)) {
1136 l = (struct leaf *) n;
1137 li = leaf_info_new(plen);
1139 if (!li)
1140 return NULL;
1142 fa_head = &li->falh;
1143 insert_leaf_info(&l->list, li);
1144 goto done;
1146 l = leaf_new();
1148 if (!l)
1149 return NULL;
1151 l->key = key;
1152 li = leaf_info_new(plen);
1154 if (!li) {
1155 free_leaf(l);
1156 return NULL;
1159 fa_head = &li->falh;
1160 insert_leaf_info(&l->list, li);
1162 if (t->trie && n == NULL) {
1163 /* Case 2: n is NULL, and will just insert a new leaf */
1165 node_set_parent((struct node *)l, tp);
1167 cindex = tkey_extract_bits(key, tp->pos, tp->bits);
1168 put_child(t, (struct tnode *)tp, cindex, (struct node *)l);
1169 } else {
1170 /* Case 3: n is a LEAF or a TNODE and the key doesn't match. */
1172 * Add a new tnode here
1173 * first tnode need some special handling
1176 if (tp)
1177 pos = tp->pos+tp->bits;
1178 else
1179 pos = 0;
1181 if (n) {
1182 newpos = tkey_mismatch(key, pos, n->key);
1183 tn = tnode_new(n->key, newpos, 1);
1184 } else {
1185 newpos = 0;
1186 tn = tnode_new(key, newpos, 1); /* First tnode */
1189 if (!tn) {
1190 free_leaf_info(li);
1191 free_leaf(l);
1192 return NULL;
1195 node_set_parent((struct node *)tn, tp);
1197 missbit = tkey_extract_bits(key, newpos, 1);
1198 put_child(t, tn, missbit, (struct node *)l);
1199 put_child(t, tn, 1-missbit, n);
1201 if (tp) {
1202 cindex = tkey_extract_bits(key, tp->pos, tp->bits);
1203 put_child(t, (struct tnode *)tp, cindex,
1204 (struct node *)tn);
1205 } else {
1206 rcu_assign_pointer(t->trie, (struct node *)tn);
1207 tp = tn;
1211 if (tp && tp->pos + tp->bits > 32)
1212 pr_warning("fib_trie"
1213 " tp=%p pos=%d, bits=%d, key=%0x plen=%d\n",
1214 tp, tp->pos, tp->bits, key, plen);
1216 /* Rebalance the trie */
1218 trie_rebalance(t, tp);
1219 done:
1220 return fa_head;
1224 * Caller must hold RTNL.
1226 static int fn_trie_insert(struct fib_table *tb, struct fib_config *cfg)
1228 struct trie *t = (struct trie *) tb->tb_data;
1229 struct fib_alias *fa, *new_fa;
1230 struct list_head *fa_head = NULL;
1231 struct fib_info *fi;
1232 int plen = cfg->fc_dst_len;
1233 u8 tos = cfg->fc_tos;
1234 u32 key, mask;
1235 int err;
1236 struct leaf *l;
1238 if (plen > 32)
1239 return -EINVAL;
1241 key = ntohl(cfg->fc_dst);
1243 pr_debug("Insert table=%u %08x/%d\n", tb->tb_id, key, plen);
1245 mask = ntohl(inet_make_mask(plen));
1247 if (key & ~mask)
1248 return -EINVAL;
1250 key = key & mask;
1252 fi = fib_create_info(cfg);
1253 if (IS_ERR(fi)) {
1254 err = PTR_ERR(fi);
1255 goto err;
1258 l = fib_find_node(t, key);
1259 fa = NULL;
1261 if (l) {
1262 fa_head = get_fa_head(l, plen);
1263 fa = fib_find_alias(fa_head, tos, fi->fib_priority);
1266 /* Now fa, if non-NULL, points to the first fib alias
1267 * with the same keys [prefix,tos,priority], if such key already
1268 * exists or to the node before which we will insert new one.
1270 * If fa is NULL, we will need to allocate a new one and
1271 * insert to the head of f.
1273 * If f is NULL, no fib node matched the destination key
1274 * and we need to allocate a new one of those as well.
1277 if (fa && fa->fa_tos == tos &&
1278 fa->fa_info->fib_priority == fi->fib_priority) {
1279 struct fib_alias *fa_first, *fa_match;
1281 err = -EEXIST;
1282 if (cfg->fc_nlflags & NLM_F_EXCL)
1283 goto out;
1285 /* We have 2 goals:
1286 * 1. Find exact match for type, scope, fib_info to avoid
1287 * duplicate routes
1288 * 2. Find next 'fa' (or head), NLM_F_APPEND inserts before it
1290 fa_match = NULL;
1291 fa_first = fa;
1292 fa = list_entry(fa->fa_list.prev, struct fib_alias, fa_list);
1293 list_for_each_entry_continue(fa, fa_head, fa_list) {
1294 if (fa->fa_tos != tos)
1295 break;
1296 if (fa->fa_info->fib_priority != fi->fib_priority)
1297 break;
1298 if (fa->fa_type == cfg->fc_type &&
1299 fa->fa_scope == cfg->fc_scope &&
1300 fa->fa_info == fi) {
1301 fa_match = fa;
1302 break;
1306 if (cfg->fc_nlflags & NLM_F_REPLACE) {
1307 struct fib_info *fi_drop;
1308 u8 state;
1310 fa = fa_first;
1311 if (fa_match) {
1312 if (fa == fa_match)
1313 err = 0;
1314 goto out;
1316 err = -ENOBUFS;
1317 new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
1318 if (new_fa == NULL)
1319 goto out;
1321 fi_drop = fa->fa_info;
1322 new_fa->fa_tos = fa->fa_tos;
1323 new_fa->fa_info = fi;
1324 new_fa->fa_type = cfg->fc_type;
1325 new_fa->fa_scope = cfg->fc_scope;
1326 state = fa->fa_state;
1327 new_fa->fa_state = state & ~FA_S_ACCESSED;
1329 list_replace_rcu(&fa->fa_list, &new_fa->fa_list);
1330 alias_free_mem_rcu(fa);
1332 fib_release_info(fi_drop);
1333 if (state & FA_S_ACCESSED)
1334 rt_cache_flush(cfg->fc_nlinfo.nl_net, -1);
1335 rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen,
1336 tb->tb_id, &cfg->fc_nlinfo, NLM_F_REPLACE);
1338 goto succeeded;
1340 /* Error if we find a perfect match which
1341 * uses the same scope, type, and nexthop
1342 * information.
1344 if (fa_match)
1345 goto out;
1347 if (!(cfg->fc_nlflags & NLM_F_APPEND))
1348 fa = fa_first;
1350 err = -ENOENT;
1351 if (!(cfg->fc_nlflags & NLM_F_CREATE))
1352 goto out;
1354 err = -ENOBUFS;
1355 new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
1356 if (new_fa == NULL)
1357 goto out;
1359 new_fa->fa_info = fi;
1360 new_fa->fa_tos = tos;
1361 new_fa->fa_type = cfg->fc_type;
1362 new_fa->fa_scope = cfg->fc_scope;
1363 new_fa->fa_state = 0;
1365 * Insert new entry to the list.
1368 if (!fa_head) {
1369 fa_head = fib_insert_node(t, key, plen);
1370 if (unlikely(!fa_head)) {
1371 err = -ENOMEM;
1372 goto out_free_new_fa;
1376 list_add_tail_rcu(&new_fa->fa_list,
1377 (fa ? &fa->fa_list : fa_head));
1379 rt_cache_flush(cfg->fc_nlinfo.nl_net, -1);
1380 rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, tb->tb_id,
1381 &cfg->fc_nlinfo, 0);
1382 succeeded:
1383 return 0;
1385 out_free_new_fa:
1386 kmem_cache_free(fn_alias_kmem, new_fa);
1387 out:
1388 fib_release_info(fi);
1389 err:
1390 return err;
1393 /* should be called with rcu_read_lock */
1394 static int check_leaf(struct trie *t, struct leaf *l,
1395 t_key key, const struct flowi *flp,
1396 struct fib_result *res)
1398 struct leaf_info *li;
1399 struct hlist_head *hhead = &l->list;
1400 struct hlist_node *node;
1402 hlist_for_each_entry_rcu(li, node, hhead, hlist) {
1403 int err;
1404 int plen = li->plen;
1405 __be32 mask = inet_make_mask(plen);
1407 if (l->key != (key & ntohl(mask)))
1408 continue;
1410 err = fib_semantic_match(&li->falh, flp, res, plen);
1412 #ifdef CONFIG_IP_FIB_TRIE_STATS
1413 if (err <= 0)
1414 t->stats.semantic_match_passed++;
1415 else
1416 t->stats.semantic_match_miss++;
1417 #endif
1418 if (err <= 0)
1419 return err;
1422 return 1;
1425 static int fn_trie_lookup(struct fib_table *tb, const struct flowi *flp,
1426 struct fib_result *res)
1428 struct trie *t = (struct trie *) tb->tb_data;
1429 int ret;
1430 struct node *n;
1431 struct tnode *pn;
1432 int pos, bits;
1433 t_key key = ntohl(flp->fl4_dst);
1434 int chopped_off;
1435 t_key cindex = 0;
1436 int current_prefix_length = KEYLENGTH;
1437 struct tnode *cn;
1438 t_key node_prefix, key_prefix, pref_mismatch;
1439 int mp;
1441 rcu_read_lock();
1443 n = rcu_dereference(t->trie);
1444 if (!n)
1445 goto failed;
1447 #ifdef CONFIG_IP_FIB_TRIE_STATS
1448 t->stats.gets++;
1449 #endif
1451 /* Just a leaf? */
1452 if (IS_LEAF(n)) {
1453 ret = check_leaf(t, (struct leaf *)n, key, flp, res);
1454 goto found;
1457 pn = (struct tnode *) n;
1458 chopped_off = 0;
1460 while (pn) {
1461 pos = pn->pos;
1462 bits = pn->bits;
1464 if (!chopped_off)
1465 cindex = tkey_extract_bits(mask_pfx(key, current_prefix_length),
1466 pos, bits);
1468 n = tnode_get_child_rcu(pn, cindex);
1470 if (n == NULL) {
1471 #ifdef CONFIG_IP_FIB_TRIE_STATS
1472 t->stats.null_node_hit++;
1473 #endif
1474 goto backtrace;
1477 if (IS_LEAF(n)) {
1478 ret = check_leaf(t, (struct leaf *)n, key, flp, res);
1479 if (ret > 0)
1480 goto backtrace;
1481 goto found;
1484 cn = (struct tnode *)n;
1487 * It's a tnode, and we can do some extra checks here if we
1488 * like, to avoid descending into a dead-end branch.
1489 * This tnode is in the parent's child array at index
1490 * key[p_pos..p_pos+p_bits] but potentially with some bits
1491 * chopped off, so in reality the index may be just a
1492 * subprefix, padded with zero at the end.
1493 * We can also take a look at any skipped bits in this
1494 * tnode - everything up to p_pos is supposed to be ok,
1495 * and the non-chopped bits of the index (se previous
1496 * paragraph) are also guaranteed ok, but the rest is
1497 * considered unknown.
1499 * The skipped bits are key[pos+bits..cn->pos].
1502 /* If current_prefix_length < pos+bits, we are already doing
1503 * actual prefix matching, which means everything from
1504 * pos+(bits-chopped_off) onward must be zero along some
1505 * branch of this subtree - otherwise there is *no* valid
1506 * prefix present. Here we can only check the skipped
1507 * bits. Remember, since we have already indexed into the
1508 * parent's child array, we know that the bits we chopped of
1509 * *are* zero.
1512 /* NOTA BENE: Checking only skipped bits
1513 for the new node here */
1515 if (current_prefix_length < pos+bits) {
1516 if (tkey_extract_bits(cn->key, current_prefix_length,
1517 cn->pos - current_prefix_length)
1518 || !(cn->child[0]))
1519 goto backtrace;
1523 * If chopped_off=0, the index is fully validated and we
1524 * only need to look at the skipped bits for this, the new,
1525 * tnode. What we actually want to do is to find out if
1526 * these skipped bits match our key perfectly, or if we will
1527 * have to count on finding a matching prefix further down,
1528 * because if we do, we would like to have some way of
1529 * verifying the existence of such a prefix at this point.
1532 /* The only thing we can do at this point is to verify that
1533 * any such matching prefix can indeed be a prefix to our
1534 * key, and if the bits in the node we are inspecting that
1535 * do not match our key are not ZERO, this cannot be true.
1536 * Thus, find out where there is a mismatch (before cn->pos)
1537 * and verify that all the mismatching bits are zero in the
1538 * new tnode's key.
1542 * Note: We aren't very concerned about the piece of
1543 * the key that precede pn->pos+pn->bits, since these
1544 * have already been checked. The bits after cn->pos
1545 * aren't checked since these are by definition
1546 * "unknown" at this point. Thus, what we want to see
1547 * is if we are about to enter the "prefix matching"
1548 * state, and in that case verify that the skipped
1549 * bits that will prevail throughout this subtree are
1550 * zero, as they have to be if we are to find a
1551 * matching prefix.
1554 node_prefix = mask_pfx(cn->key, cn->pos);
1555 key_prefix = mask_pfx(key, cn->pos);
1556 pref_mismatch = key_prefix^node_prefix;
1557 mp = 0;
1560 * In short: If skipped bits in this node do not match
1561 * the search key, enter the "prefix matching"
1562 * state.directly.
1564 if (pref_mismatch) {
1565 while (!(pref_mismatch & (1<<(KEYLENGTH-1)))) {
1566 mp++;
1567 pref_mismatch = pref_mismatch << 1;
1569 key_prefix = tkey_extract_bits(cn->key, mp, cn->pos-mp);
1571 if (key_prefix != 0)
1572 goto backtrace;
1574 if (current_prefix_length >= cn->pos)
1575 current_prefix_length = mp;
1578 pn = (struct tnode *)n; /* Descend */
1579 chopped_off = 0;
1580 continue;
1582 backtrace:
1583 chopped_off++;
1585 /* As zero don't change the child key (cindex) */
1586 while ((chopped_off <= pn->bits)
1587 && !(cindex & (1<<(chopped_off-1))))
1588 chopped_off++;
1590 /* Decrease current_... with bits chopped off */
1591 if (current_prefix_length > pn->pos + pn->bits - chopped_off)
1592 current_prefix_length = pn->pos + pn->bits
1593 - chopped_off;
1596 * Either we do the actual chop off according or if we have
1597 * chopped off all bits in this tnode walk up to our parent.
1600 if (chopped_off <= pn->bits) {
1601 cindex &= ~(1 << (chopped_off-1));
1602 } else {
1603 struct tnode *parent = node_parent_rcu((struct node *) pn);
1604 if (!parent)
1605 goto failed;
1607 /* Get Child's index */
1608 cindex = tkey_extract_bits(pn->key, parent->pos, parent->bits);
1609 pn = parent;
1610 chopped_off = 0;
1612 #ifdef CONFIG_IP_FIB_TRIE_STATS
1613 t->stats.backtrack++;
1614 #endif
1615 goto backtrace;
1618 failed:
1619 ret = 1;
1620 found:
1621 rcu_read_unlock();
1622 return ret;
1626 * Remove the leaf and return parent.
1628 static void trie_leaf_remove(struct trie *t, struct leaf *l)
1630 struct tnode *tp = node_parent((struct node *) l);
1632 pr_debug("entering trie_leaf_remove(%p)\n", l);
1634 if (tp) {
1635 t_key cindex = tkey_extract_bits(l->key, tp->pos, tp->bits);
1636 put_child(t, (struct tnode *)tp, cindex, NULL);
1637 trie_rebalance(t, tp);
1638 } else
1639 rcu_assign_pointer(t->trie, NULL);
1641 free_leaf(l);
1645 * Caller must hold RTNL.
1647 static int fn_trie_delete(struct fib_table *tb, struct fib_config *cfg)
1649 struct trie *t = (struct trie *) tb->tb_data;
1650 u32 key, mask;
1651 int plen = cfg->fc_dst_len;
1652 u8 tos = cfg->fc_tos;
1653 struct fib_alias *fa, *fa_to_delete;
1654 struct list_head *fa_head;
1655 struct leaf *l;
1656 struct leaf_info *li;
1658 if (plen > 32)
1659 return -EINVAL;
1661 key = ntohl(cfg->fc_dst);
1662 mask = ntohl(inet_make_mask(plen));
1664 if (key & ~mask)
1665 return -EINVAL;
1667 key = key & mask;
1668 l = fib_find_node(t, key);
1670 if (!l)
1671 return -ESRCH;
1673 fa_head = get_fa_head(l, plen);
1674 fa = fib_find_alias(fa_head, tos, 0);
1676 if (!fa)
1677 return -ESRCH;
1679 pr_debug("Deleting %08x/%d tos=%d t=%p\n", key, plen, tos, t);
1681 fa_to_delete = NULL;
1682 fa = list_entry(fa->fa_list.prev, struct fib_alias, fa_list);
1683 list_for_each_entry_continue(fa, fa_head, fa_list) {
1684 struct fib_info *fi = fa->fa_info;
1686 if (fa->fa_tos != tos)
1687 break;
1689 if ((!cfg->fc_type || fa->fa_type == cfg->fc_type) &&
1690 (cfg->fc_scope == RT_SCOPE_NOWHERE ||
1691 fa->fa_scope == cfg->fc_scope) &&
1692 (!cfg->fc_protocol ||
1693 fi->fib_protocol == cfg->fc_protocol) &&
1694 fib_nh_match(cfg, fi) == 0) {
1695 fa_to_delete = fa;
1696 break;
1700 if (!fa_to_delete)
1701 return -ESRCH;
1703 fa = fa_to_delete;
1704 rtmsg_fib(RTM_DELROUTE, htonl(key), fa, plen, tb->tb_id,
1705 &cfg->fc_nlinfo, 0);
1707 l = fib_find_node(t, key);
1708 li = find_leaf_info(l, plen);
1710 list_del_rcu(&fa->fa_list);
1712 if (list_empty(fa_head)) {
1713 hlist_del_rcu(&li->hlist);
1714 free_leaf_info(li);
1717 if (hlist_empty(&l->list))
1718 trie_leaf_remove(t, l);
1720 if (fa->fa_state & FA_S_ACCESSED)
1721 rt_cache_flush(cfg->fc_nlinfo.nl_net, -1);
1723 fib_release_info(fa->fa_info);
1724 alias_free_mem_rcu(fa);
1725 return 0;
1728 static int trie_flush_list(struct list_head *head)
1730 struct fib_alias *fa, *fa_node;
1731 int found = 0;
1733 list_for_each_entry_safe(fa, fa_node, head, fa_list) {
1734 struct fib_info *fi = fa->fa_info;
1736 if (fi && (fi->fib_flags & RTNH_F_DEAD)) {
1737 list_del_rcu(&fa->fa_list);
1738 fib_release_info(fa->fa_info);
1739 alias_free_mem_rcu(fa);
1740 found++;
1743 return found;
1746 static int trie_flush_leaf(struct leaf *l)
1748 int found = 0;
1749 struct hlist_head *lih = &l->list;
1750 struct hlist_node *node, *tmp;
1751 struct leaf_info *li = NULL;
1753 hlist_for_each_entry_safe(li, node, tmp, lih, hlist) {
1754 found += trie_flush_list(&li->falh);
1756 if (list_empty(&li->falh)) {
1757 hlist_del_rcu(&li->hlist);
1758 free_leaf_info(li);
1761 return found;
1765 * Scan for the next right leaf starting at node p->child[idx]
1766 * Since we have back pointer, no recursion necessary.
1768 static struct leaf *leaf_walk_rcu(struct tnode *p, struct node *c)
1770 do {
1771 t_key idx;
1773 if (c)
1774 idx = tkey_extract_bits(c->key, p->pos, p->bits) + 1;
1775 else
1776 idx = 0;
1778 while (idx < 1u << p->bits) {
1779 c = tnode_get_child_rcu(p, idx++);
1780 if (!c)
1781 continue;
1783 if (IS_LEAF(c)) {
1784 prefetch(p->child[idx]);
1785 return (struct leaf *) c;
1788 /* Rescan start scanning in new node */
1789 p = (struct tnode *) c;
1790 idx = 0;
1793 /* Node empty, walk back up to parent */
1794 c = (struct node *) p;
1795 } while ( (p = node_parent_rcu(c)) != NULL);
1797 return NULL; /* Root of trie */
1800 static struct leaf *trie_firstleaf(struct trie *t)
1802 struct tnode *n = (struct tnode *) rcu_dereference(t->trie);
1804 if (!n)
1805 return NULL;
1807 if (IS_LEAF(n)) /* trie is just a leaf */
1808 return (struct leaf *) n;
1810 return leaf_walk_rcu(n, NULL);
1813 static struct leaf *trie_nextleaf(struct leaf *l)
1815 struct node *c = (struct node *) l;
1816 struct tnode *p = node_parent_rcu(c);
1818 if (!p)
1819 return NULL; /* trie with just one leaf */
1821 return leaf_walk_rcu(p, c);
1824 static struct leaf *trie_leafindex(struct trie *t, int index)
1826 struct leaf *l = trie_firstleaf(t);
1828 while (l && index-- > 0)
1829 l = trie_nextleaf(l);
1831 return l;
1836 * Caller must hold RTNL.
1838 static int fn_trie_flush(struct fib_table *tb)
1840 struct trie *t = (struct trie *) tb->tb_data;
1841 struct leaf *l, *ll = NULL;
1842 int found = 0;
1844 for (l = trie_firstleaf(t); l; l = trie_nextleaf(l)) {
1845 found += trie_flush_leaf(l);
1847 if (ll && hlist_empty(&ll->list))
1848 trie_leaf_remove(t, ll);
1849 ll = l;
1852 if (ll && hlist_empty(&ll->list))
1853 trie_leaf_remove(t, ll);
1855 pr_debug("trie_flush found=%d\n", found);
1856 return found;
1859 static void fn_trie_select_default(struct fib_table *tb,
1860 const struct flowi *flp,
1861 struct fib_result *res)
1863 struct trie *t = (struct trie *) tb->tb_data;
1864 int order, last_idx;
1865 struct fib_info *fi = NULL;
1866 struct fib_info *last_resort;
1867 struct fib_alias *fa = NULL;
1868 struct list_head *fa_head;
1869 struct leaf *l;
1871 last_idx = -1;
1872 last_resort = NULL;
1873 order = -1;
1875 rcu_read_lock();
1877 l = fib_find_node(t, 0);
1878 if (!l)
1879 goto out;
1881 fa_head = get_fa_head(l, 0);
1882 if (!fa_head)
1883 goto out;
1885 if (list_empty(fa_head))
1886 goto out;
1888 list_for_each_entry_rcu(fa, fa_head, fa_list) {
1889 struct fib_info *next_fi = fa->fa_info;
1891 if (fa->fa_scope != res->scope ||
1892 fa->fa_type != RTN_UNICAST)
1893 continue;
1895 if (next_fi->fib_priority > res->fi->fib_priority)
1896 break;
1897 if (!next_fi->fib_nh[0].nh_gw ||
1898 next_fi->fib_nh[0].nh_scope != RT_SCOPE_LINK)
1899 continue;
1900 fa->fa_state |= FA_S_ACCESSED;
1902 if (fi == NULL) {
1903 if (next_fi != res->fi)
1904 break;
1905 } else if (!fib_detect_death(fi, order, &last_resort,
1906 &last_idx, tb->tb_default)) {
1907 fib_result_assign(res, fi);
1908 tb->tb_default = order;
1909 goto out;
1911 fi = next_fi;
1912 order++;
1914 if (order <= 0 || fi == NULL) {
1915 tb->tb_default = -1;
1916 goto out;
1919 if (!fib_detect_death(fi, order, &last_resort, &last_idx,
1920 tb->tb_default)) {
1921 fib_result_assign(res, fi);
1922 tb->tb_default = order;
1923 goto out;
1925 if (last_idx >= 0)
1926 fib_result_assign(res, last_resort);
1927 tb->tb_default = last_idx;
1928 out:
1929 rcu_read_unlock();
1932 static int fn_trie_dump_fa(t_key key, int plen, struct list_head *fah,
1933 struct fib_table *tb,
1934 struct sk_buff *skb, struct netlink_callback *cb)
1936 int i, s_i;
1937 struct fib_alias *fa;
1938 __be32 xkey = htonl(key);
1940 s_i = cb->args[5];
1941 i = 0;
1943 /* rcu_read_lock is hold by caller */
1945 list_for_each_entry_rcu(fa, fah, fa_list) {
1946 if (i < s_i) {
1947 i++;
1948 continue;
1951 if (fib_dump_info(skb, NETLINK_CB(cb->skb).pid,
1952 cb->nlh->nlmsg_seq,
1953 RTM_NEWROUTE,
1954 tb->tb_id,
1955 fa->fa_type,
1956 fa->fa_scope,
1957 xkey,
1958 plen,
1959 fa->fa_tos,
1960 fa->fa_info, NLM_F_MULTI) < 0) {
1961 cb->args[5] = i;
1962 return -1;
1964 i++;
1966 cb->args[5] = i;
1967 return skb->len;
1970 static int fn_trie_dump_leaf(struct leaf *l, struct fib_table *tb,
1971 struct sk_buff *skb, struct netlink_callback *cb)
1973 struct leaf_info *li;
1974 struct hlist_node *node;
1975 int i, s_i;
1977 s_i = cb->args[4];
1978 i = 0;
1980 /* rcu_read_lock is hold by caller */
1981 hlist_for_each_entry_rcu(li, node, &l->list, hlist) {
1982 if (i < s_i) {
1983 i++;
1984 continue;
1987 if (i > s_i)
1988 cb->args[5] = 0;
1990 if (list_empty(&li->falh))
1991 continue;
1993 if (fn_trie_dump_fa(l->key, li->plen, &li->falh, tb, skb, cb) < 0) {
1994 cb->args[4] = i;
1995 return -1;
1997 i++;
2000 cb->args[4] = i;
2001 return skb->len;
2004 static int fn_trie_dump(struct fib_table *tb, struct sk_buff *skb,
2005 struct netlink_callback *cb)
2007 struct leaf *l;
2008 struct trie *t = (struct trie *) tb->tb_data;
2009 t_key key = cb->args[2];
2010 int count = cb->args[3];
2012 rcu_read_lock();
2013 /* Dump starting at last key.
2014 * Note: 0.0.0.0/0 (ie default) is first key.
2016 if (count == 0)
2017 l = trie_firstleaf(t);
2018 else {
2019 /* Normally, continue from last key, but if that is missing
2020 * fallback to using slow rescan
2022 l = fib_find_node(t, key);
2023 if (!l)
2024 l = trie_leafindex(t, count);
2027 while (l) {
2028 cb->args[2] = l->key;
2029 if (fn_trie_dump_leaf(l, tb, skb, cb) < 0) {
2030 cb->args[3] = count;
2031 rcu_read_unlock();
2032 return -1;
2035 ++count;
2036 l = trie_nextleaf(l);
2037 memset(&cb->args[4], 0,
2038 sizeof(cb->args) - 4*sizeof(cb->args[0]));
2040 cb->args[3] = count;
2041 rcu_read_unlock();
2043 return skb->len;
2046 void __init fib_hash_init(void)
2048 fn_alias_kmem = kmem_cache_create("ip_fib_alias",
2049 sizeof(struct fib_alias),
2050 0, SLAB_PANIC, NULL);
2052 trie_leaf_kmem = kmem_cache_create("ip_fib_trie",
2053 max(sizeof(struct leaf),
2054 sizeof(struct leaf_info)),
2055 0, SLAB_PANIC, NULL);
2059 /* Fix more generic FIB names for init later */
2060 struct fib_table *fib_hash_table(u32 id)
2062 struct fib_table *tb;
2063 struct trie *t;
2065 tb = kmalloc(sizeof(struct fib_table) + sizeof(struct trie),
2066 GFP_KERNEL);
2067 if (tb == NULL)
2068 return NULL;
2070 tb->tb_id = id;
2071 tb->tb_default = -1;
2072 tb->tb_lookup = fn_trie_lookup;
2073 tb->tb_insert = fn_trie_insert;
2074 tb->tb_delete = fn_trie_delete;
2075 tb->tb_flush = fn_trie_flush;
2076 tb->tb_select_default = fn_trie_select_default;
2077 tb->tb_dump = fn_trie_dump;
2079 t = (struct trie *) tb->tb_data;
2080 memset(t, 0, sizeof(*t));
2082 if (id == RT_TABLE_LOCAL)
2083 pr_info("IPv4 FIB: Using LC-trie version %s\n", VERSION);
2085 return tb;
2088 #ifdef CONFIG_PROC_FS
2089 /* Depth first Trie walk iterator */
2090 struct fib_trie_iter {
2091 struct seq_net_private p;
2092 struct fib_table *tb;
2093 struct tnode *tnode;
2094 unsigned index;
2095 unsigned depth;
2098 static struct node *fib_trie_get_next(struct fib_trie_iter *iter)
2100 struct tnode *tn = iter->tnode;
2101 unsigned cindex = iter->index;
2102 struct tnode *p;
2104 /* A single entry routing table */
2105 if (!tn)
2106 return NULL;
2108 pr_debug("get_next iter={node=%p index=%d depth=%d}\n",
2109 iter->tnode, iter->index, iter->depth);
2110 rescan:
2111 while (cindex < (1<<tn->bits)) {
2112 struct node *n = tnode_get_child_rcu(tn, cindex);
2114 if (n) {
2115 if (IS_LEAF(n)) {
2116 iter->tnode = tn;
2117 iter->index = cindex + 1;
2118 } else {
2119 /* push down one level */
2120 iter->tnode = (struct tnode *) n;
2121 iter->index = 0;
2122 ++iter->depth;
2124 return n;
2127 ++cindex;
2130 /* Current node exhausted, pop back up */
2131 p = node_parent_rcu((struct node *)tn);
2132 if (p) {
2133 cindex = tkey_extract_bits(tn->key, p->pos, p->bits)+1;
2134 tn = p;
2135 --iter->depth;
2136 goto rescan;
2139 /* got root? */
2140 return NULL;
2143 static struct node *fib_trie_get_first(struct fib_trie_iter *iter,
2144 struct trie *t)
2146 struct node *n;
2148 if (!t)
2149 return NULL;
2151 n = rcu_dereference(t->trie);
2152 if (!n)
2153 return NULL;
2155 if (IS_TNODE(n)) {
2156 iter->tnode = (struct tnode *) n;
2157 iter->index = 0;
2158 iter->depth = 1;
2159 } else {
2160 iter->tnode = NULL;
2161 iter->index = 0;
2162 iter->depth = 0;
2165 return n;
2168 static void trie_collect_stats(struct trie *t, struct trie_stat *s)
2170 struct node *n;
2171 struct fib_trie_iter iter;
2173 memset(s, 0, sizeof(*s));
2175 rcu_read_lock();
2176 for (n = fib_trie_get_first(&iter, t); n; n = fib_trie_get_next(&iter)) {
2177 if (IS_LEAF(n)) {
2178 struct leaf *l = (struct leaf *)n;
2179 struct leaf_info *li;
2180 struct hlist_node *tmp;
2182 s->leaves++;
2183 s->totdepth += iter.depth;
2184 if (iter.depth > s->maxdepth)
2185 s->maxdepth = iter.depth;
2187 hlist_for_each_entry_rcu(li, tmp, &l->list, hlist)
2188 ++s->prefixes;
2189 } else {
2190 const struct tnode *tn = (const struct tnode *) n;
2191 int i;
2193 s->tnodes++;
2194 if (tn->bits < MAX_STAT_DEPTH)
2195 s->nodesizes[tn->bits]++;
2197 for (i = 0; i < (1<<tn->bits); i++)
2198 if (!tn->child[i])
2199 s->nullpointers++;
2202 rcu_read_unlock();
2206 * This outputs /proc/net/fib_triestats
2208 static void trie_show_stats(struct seq_file *seq, struct trie_stat *stat)
2210 unsigned i, max, pointers, bytes, avdepth;
2212 if (stat->leaves)
2213 avdepth = stat->totdepth*100 / stat->leaves;
2214 else
2215 avdepth = 0;
2217 seq_printf(seq, "\tAver depth: %u.%02d\n",
2218 avdepth / 100, avdepth % 100);
2219 seq_printf(seq, "\tMax depth: %u\n", stat->maxdepth);
2221 seq_printf(seq, "\tLeaves: %u\n", stat->leaves);
2222 bytes = sizeof(struct leaf) * stat->leaves;
2224 seq_printf(seq, "\tPrefixes: %u\n", stat->prefixes);
2225 bytes += sizeof(struct leaf_info) * stat->prefixes;
2227 seq_printf(seq, "\tInternal nodes: %u\n\t", stat->tnodes);
2228 bytes += sizeof(struct tnode) * stat->tnodes;
2230 max = MAX_STAT_DEPTH;
2231 while (max > 0 && stat->nodesizes[max-1] == 0)
2232 max--;
2234 pointers = 0;
2235 for (i = 1; i <= max; i++)
2236 if (stat->nodesizes[i] != 0) {
2237 seq_printf(seq, " %u: %u", i, stat->nodesizes[i]);
2238 pointers += (1<<i) * stat->nodesizes[i];
2240 seq_putc(seq, '\n');
2241 seq_printf(seq, "\tPointers: %u\n", pointers);
2243 bytes += sizeof(struct node *) * pointers;
2244 seq_printf(seq, "Null ptrs: %u\n", stat->nullpointers);
2245 seq_printf(seq, "Total size: %u kB\n", (bytes + 1023) / 1024);
2248 #ifdef CONFIG_IP_FIB_TRIE_STATS
2249 static void trie_show_usage(struct seq_file *seq,
2250 const struct trie_use_stats *stats)
2252 seq_printf(seq, "\nCounters:\n---------\n");
2253 seq_printf(seq, "gets = %u\n", stats->gets);
2254 seq_printf(seq, "backtracks = %u\n", stats->backtrack);
2255 seq_printf(seq, "semantic match passed = %u\n",
2256 stats->semantic_match_passed);
2257 seq_printf(seq, "semantic match miss = %u\n",
2258 stats->semantic_match_miss);
2259 seq_printf(seq, "null node hit= %u\n", stats->null_node_hit);
2260 seq_printf(seq, "skipped node resize = %u\n\n",
2261 stats->resize_node_skipped);
2263 #endif /* CONFIG_IP_FIB_TRIE_STATS */
2265 static void fib_table_print(struct seq_file *seq, struct fib_table *tb)
2267 if (tb->tb_id == RT_TABLE_LOCAL)
2268 seq_puts(seq, "Local:\n");
2269 else if (tb->tb_id == RT_TABLE_MAIN)
2270 seq_puts(seq, "Main:\n");
2271 else
2272 seq_printf(seq, "Id %d:\n", tb->tb_id);
2276 static int fib_triestat_seq_show(struct seq_file *seq, void *v)
2278 struct net *net = (struct net *)seq->private;
2279 unsigned int h;
2281 seq_printf(seq,
2282 "Basic info: size of leaf:"
2283 " %Zd bytes, size of tnode: %Zd bytes.\n",
2284 sizeof(struct leaf), sizeof(struct tnode));
2286 for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
2287 struct hlist_head *head = &net->ipv4.fib_table_hash[h];
2288 struct hlist_node *node;
2289 struct fib_table *tb;
2291 hlist_for_each_entry_rcu(tb, node, head, tb_hlist) {
2292 struct trie *t = (struct trie *) tb->tb_data;
2293 struct trie_stat stat;
2295 if (!t)
2296 continue;
2298 fib_table_print(seq, tb);
2300 trie_collect_stats(t, &stat);
2301 trie_show_stats(seq, &stat);
2302 #ifdef CONFIG_IP_FIB_TRIE_STATS
2303 trie_show_usage(seq, &t->stats);
2304 #endif
2308 return 0;
2311 static int fib_triestat_seq_open(struct inode *inode, struct file *file)
2313 return single_open_net(inode, file, fib_triestat_seq_show);
2316 static const struct file_operations fib_triestat_fops = {
2317 .owner = THIS_MODULE,
2318 .open = fib_triestat_seq_open,
2319 .read = seq_read,
2320 .llseek = seq_lseek,
2321 .release = single_release_net,
2324 static struct node *fib_trie_get_idx(struct seq_file *seq, loff_t pos)
2326 struct fib_trie_iter *iter = seq->private;
2327 struct net *net = seq_file_net(seq);
2328 loff_t idx = 0;
2329 unsigned int h;
2331 for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
2332 struct hlist_head *head = &net->ipv4.fib_table_hash[h];
2333 struct hlist_node *node;
2334 struct fib_table *tb;
2336 hlist_for_each_entry_rcu(tb, node, head, tb_hlist) {
2337 struct node *n;
2339 for (n = fib_trie_get_first(iter,
2340 (struct trie *) tb->tb_data);
2341 n; n = fib_trie_get_next(iter))
2342 if (pos == idx++) {
2343 iter->tb = tb;
2344 return n;
2349 return NULL;
2352 static void *fib_trie_seq_start(struct seq_file *seq, loff_t *pos)
2353 __acquires(RCU)
2355 rcu_read_lock();
2356 return fib_trie_get_idx(seq, *pos);
2359 static void *fib_trie_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2361 struct fib_trie_iter *iter = seq->private;
2362 struct net *net = seq_file_net(seq);
2363 struct fib_table *tb = iter->tb;
2364 struct hlist_node *tb_node;
2365 unsigned int h;
2366 struct node *n;
2368 ++*pos;
2369 /* next node in same table */
2370 n = fib_trie_get_next(iter);
2371 if (n)
2372 return n;
2374 /* walk rest of this hash chain */
2375 h = tb->tb_id & (FIB_TABLE_HASHSZ - 1);
2376 while ( (tb_node = rcu_dereference(tb->tb_hlist.next)) ) {
2377 tb = hlist_entry(tb_node, struct fib_table, tb_hlist);
2378 n = fib_trie_get_first(iter, (struct trie *) tb->tb_data);
2379 if (n)
2380 goto found;
2383 /* new hash chain */
2384 while (++h < FIB_TABLE_HASHSZ) {
2385 struct hlist_head *head = &net->ipv4.fib_table_hash[h];
2386 hlist_for_each_entry_rcu(tb, tb_node, head, tb_hlist) {
2387 n = fib_trie_get_first(iter, (struct trie *) tb->tb_data);
2388 if (n)
2389 goto found;
2392 return NULL;
2394 found:
2395 iter->tb = tb;
2396 return n;
2399 static void fib_trie_seq_stop(struct seq_file *seq, void *v)
2400 __releases(RCU)
2402 rcu_read_unlock();
2405 static void seq_indent(struct seq_file *seq, int n)
2407 while (n-- > 0) seq_puts(seq, " ");
2410 static inline const char *rtn_scope(char *buf, size_t len, enum rt_scope_t s)
2412 switch (s) {
2413 case RT_SCOPE_UNIVERSE: return "universe";
2414 case RT_SCOPE_SITE: return "site";
2415 case RT_SCOPE_LINK: return "link";
2416 case RT_SCOPE_HOST: return "host";
2417 case RT_SCOPE_NOWHERE: return "nowhere";
2418 default:
2419 snprintf(buf, len, "scope=%d", s);
2420 return buf;
2424 static const char *const rtn_type_names[__RTN_MAX] = {
2425 [RTN_UNSPEC] = "UNSPEC",
2426 [RTN_UNICAST] = "UNICAST",
2427 [RTN_LOCAL] = "LOCAL",
2428 [RTN_BROADCAST] = "BROADCAST",
2429 [RTN_ANYCAST] = "ANYCAST",
2430 [RTN_MULTICAST] = "MULTICAST",
2431 [RTN_BLACKHOLE] = "BLACKHOLE",
2432 [RTN_UNREACHABLE] = "UNREACHABLE",
2433 [RTN_PROHIBIT] = "PROHIBIT",
2434 [RTN_THROW] = "THROW",
2435 [RTN_NAT] = "NAT",
2436 [RTN_XRESOLVE] = "XRESOLVE",
2439 static inline const char *rtn_type(char *buf, size_t len, unsigned t)
2441 if (t < __RTN_MAX && rtn_type_names[t])
2442 return rtn_type_names[t];
2443 snprintf(buf, len, "type %u", t);
2444 return buf;
2447 /* Pretty print the trie */
2448 static int fib_trie_seq_show(struct seq_file *seq, void *v)
2450 const struct fib_trie_iter *iter = seq->private;
2451 struct node *n = v;
2453 if (!node_parent_rcu(n))
2454 fib_table_print(seq, iter->tb);
2456 if (IS_TNODE(n)) {
2457 struct tnode *tn = (struct tnode *) n;
2458 __be32 prf = htonl(mask_pfx(tn->key, tn->pos));
2460 seq_indent(seq, iter->depth-1);
2461 seq_printf(seq, " +-- %pI4/%d %d %d %d\n",
2462 &prf, tn->pos, tn->bits, tn->full_children,
2463 tn->empty_children);
2465 } else {
2466 struct leaf *l = (struct leaf *) n;
2467 struct leaf_info *li;
2468 struct hlist_node *node;
2469 __be32 val = htonl(l->key);
2471 seq_indent(seq, iter->depth);
2472 seq_printf(seq, " |-- %pI4\n", &val);
2474 hlist_for_each_entry_rcu(li, node, &l->list, hlist) {
2475 struct fib_alias *fa;
2477 list_for_each_entry_rcu(fa, &li->falh, fa_list) {
2478 char buf1[32], buf2[32];
2480 seq_indent(seq, iter->depth+1);
2481 seq_printf(seq, " /%d %s %s", li->plen,
2482 rtn_scope(buf1, sizeof(buf1),
2483 fa->fa_scope),
2484 rtn_type(buf2, sizeof(buf2),
2485 fa->fa_type));
2486 if (fa->fa_tos)
2487 seq_printf(seq, " tos=%d", fa->fa_tos);
2488 seq_putc(seq, '\n');
2493 return 0;
2496 static const struct seq_operations fib_trie_seq_ops = {
2497 .start = fib_trie_seq_start,
2498 .next = fib_trie_seq_next,
2499 .stop = fib_trie_seq_stop,
2500 .show = fib_trie_seq_show,
2503 static int fib_trie_seq_open(struct inode *inode, struct file *file)
2505 return seq_open_net(inode, file, &fib_trie_seq_ops,
2506 sizeof(struct fib_trie_iter));
2509 static const struct file_operations fib_trie_fops = {
2510 .owner = THIS_MODULE,
2511 .open = fib_trie_seq_open,
2512 .read = seq_read,
2513 .llseek = seq_lseek,
2514 .release = seq_release_net,
2517 struct fib_route_iter {
2518 struct seq_net_private p;
2519 struct trie *main_trie;
2520 loff_t pos;
2521 t_key key;
2524 static struct leaf *fib_route_get_idx(struct fib_route_iter *iter, loff_t pos)
2526 struct leaf *l = NULL;
2527 struct trie *t = iter->main_trie;
2529 /* use cache location of last found key */
2530 if (iter->pos > 0 && pos >= iter->pos && (l = fib_find_node(t, iter->key)))
2531 pos -= iter->pos;
2532 else {
2533 iter->pos = 0;
2534 l = trie_firstleaf(t);
2537 while (l && pos-- > 0) {
2538 iter->pos++;
2539 l = trie_nextleaf(l);
2542 if (l)
2543 iter->key = pos; /* remember it */
2544 else
2545 iter->pos = 0; /* forget it */
2547 return l;
2550 static void *fib_route_seq_start(struct seq_file *seq, loff_t *pos)
2551 __acquires(RCU)
2553 struct fib_route_iter *iter = seq->private;
2554 struct fib_table *tb;
2556 rcu_read_lock();
2557 tb = fib_get_table(seq_file_net(seq), RT_TABLE_MAIN);
2558 if (!tb)
2559 return NULL;
2561 iter->main_trie = (struct trie *) tb->tb_data;
2562 if (*pos == 0)
2563 return SEQ_START_TOKEN;
2564 else
2565 return fib_route_get_idx(iter, *pos - 1);
2568 static void *fib_route_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2570 struct fib_route_iter *iter = seq->private;
2571 struct leaf *l = v;
2573 ++*pos;
2574 if (v == SEQ_START_TOKEN) {
2575 iter->pos = 0;
2576 l = trie_firstleaf(iter->main_trie);
2577 } else {
2578 iter->pos++;
2579 l = trie_nextleaf(l);
2582 if (l)
2583 iter->key = l->key;
2584 else
2585 iter->pos = 0;
2586 return l;
2589 static void fib_route_seq_stop(struct seq_file *seq, void *v)
2590 __releases(RCU)
2592 rcu_read_unlock();
2595 static unsigned fib_flag_trans(int type, __be32 mask, const struct fib_info *fi)
2597 static unsigned type2flags[RTN_MAX + 1] = {
2598 [7] = RTF_REJECT, [8] = RTF_REJECT,
2600 unsigned flags = type2flags[type];
2602 if (fi && fi->fib_nh->nh_gw)
2603 flags |= RTF_GATEWAY;
2604 if (mask == htonl(0xFFFFFFFF))
2605 flags |= RTF_HOST;
2606 flags |= RTF_UP;
2607 return flags;
2611 * This outputs /proc/net/route.
2612 * The format of the file is not supposed to be changed
2613 * and needs to be same as fib_hash output to avoid breaking
2614 * legacy utilities
2616 static int fib_route_seq_show(struct seq_file *seq, void *v)
2618 struct leaf *l = v;
2619 struct leaf_info *li;
2620 struct hlist_node *node;
2622 if (v == SEQ_START_TOKEN) {
2623 seq_printf(seq, "%-127s\n", "Iface\tDestination\tGateway "
2624 "\tFlags\tRefCnt\tUse\tMetric\tMask\t\tMTU"
2625 "\tWindow\tIRTT");
2626 return 0;
2629 hlist_for_each_entry_rcu(li, node, &l->list, hlist) {
2630 struct fib_alias *fa;
2631 __be32 mask, prefix;
2633 mask = inet_make_mask(li->plen);
2634 prefix = htonl(l->key);
2636 list_for_each_entry_rcu(fa, &li->falh, fa_list) {
2637 const struct fib_info *fi = fa->fa_info;
2638 unsigned flags = fib_flag_trans(fa->fa_type, mask, fi);
2639 int len;
2641 if (fa->fa_type == RTN_BROADCAST
2642 || fa->fa_type == RTN_MULTICAST)
2643 continue;
2645 if (fi)
2646 seq_printf(seq,
2647 "%s\t%08X\t%08X\t%04X\t%d\t%u\t"
2648 "%d\t%08X\t%d\t%u\t%u%n",
2649 fi->fib_dev ? fi->fib_dev->name : "*",
2650 prefix,
2651 fi->fib_nh->nh_gw, flags, 0, 0,
2652 fi->fib_priority,
2653 mask,
2654 (fi->fib_advmss ?
2655 fi->fib_advmss + 40 : 0),
2656 fi->fib_window,
2657 fi->fib_rtt >> 3, &len);
2658 else
2659 seq_printf(seq,
2660 "*\t%08X\t%08X\t%04X\t%d\t%u\t"
2661 "%d\t%08X\t%d\t%u\t%u%n",
2662 prefix, 0, flags, 0, 0, 0,
2663 mask, 0, 0, 0, &len);
2665 seq_printf(seq, "%*s\n", 127 - len, "");
2669 return 0;
2672 static const struct seq_operations fib_route_seq_ops = {
2673 .start = fib_route_seq_start,
2674 .next = fib_route_seq_next,
2675 .stop = fib_route_seq_stop,
2676 .show = fib_route_seq_show,
2679 static int fib_route_seq_open(struct inode *inode, struct file *file)
2681 return seq_open_net(inode, file, &fib_route_seq_ops,
2682 sizeof(struct fib_route_iter));
2685 static const struct file_operations fib_route_fops = {
2686 .owner = THIS_MODULE,
2687 .open = fib_route_seq_open,
2688 .read = seq_read,
2689 .llseek = seq_lseek,
2690 .release = seq_release_net,
2693 int __net_init fib_proc_init(struct net *net)
2695 if (!proc_net_fops_create(net, "fib_trie", S_IRUGO, &fib_trie_fops))
2696 goto out1;
2698 if (!proc_net_fops_create(net, "fib_triestat", S_IRUGO,
2699 &fib_triestat_fops))
2700 goto out2;
2702 if (!proc_net_fops_create(net, "route", S_IRUGO, &fib_route_fops))
2703 goto out3;
2705 return 0;
2707 out3:
2708 proc_net_remove(net, "fib_triestat");
2709 out2:
2710 proc_net_remove(net, "fib_trie");
2711 out1:
2712 return -ENOMEM;
2715 void __net_exit fib_proc_exit(struct net *net)
2717 proc_net_remove(net, "fib_trie");
2718 proc_net_remove(net, "fib_triestat");
2719 proc_net_remove(net, "route");
2722 #endif /* CONFIG_PROC_FS */