Committer: Michael Beasley <mike@snafu.setup>
[mikesnafu-overlay.git] / net / ipv4 / fib_trie.c
blobf6cdc012eec559a28a176cd00569fee0bf171a08
1 /*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version
5 * 2 of the License, or (at your option) any later version.
7 * Robert Olsson <robert.olsson@its.uu.se> Uppsala Universitet
8 * & Swedish University of Agricultural Sciences.
10 * Jens Laas <jens.laas@data.slu.se> Swedish University of
11 * Agricultural Sciences.
13 * Hans Liss <hans.liss@its.uu.se> Uppsala Universitet
15 * This work is based on the LPC-trie which is originally descibed in:
17 * An experimental study of compression methods for dynamic tries
18 * Stefan Nilsson and Matti Tikkanen. Algorithmica, 33(1):19-33, 2002.
19 * http://www.nada.kth.se/~snilsson/public/papers/dyntrie2/
22 * IP-address lookup using LC-tries. Stefan Nilsson and Gunnar Karlsson
23 * IEEE Journal on Selected Areas in Communications, 17(6):1083-1092, June 1999
25 * Version: $Id: fib_trie.c,v 1.3 2005/06/08 14:20:01 robert Exp $
28 * Code from fib_hash has been reused which includes the following header:
31 * INET An implementation of the TCP/IP protocol suite for the LINUX
32 * operating system. INET is implemented using the BSD Socket
33 * interface as the means of communication with the user level.
35 * IPv4 FIB: lookup engine and maintenance routines.
38 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
40 * This program is free software; you can redistribute it and/or
41 * modify it under the terms of the GNU General Public License
42 * as published by the Free Software Foundation; either version
43 * 2 of the License, or (at your option) any later version.
45 * Substantial contributions to this work comes from:
47 * David S. Miller, <davem@davemloft.net>
48 * Stephen Hemminger <shemminger@osdl.org>
49 * Paul E. McKenney <paulmck@us.ibm.com>
50 * Patrick McHardy <kaber@trash.net>
53 #define VERSION "0.408"
55 #include <asm/uaccess.h>
56 #include <asm/system.h>
57 #include <linux/bitops.h>
58 #include <linux/types.h>
59 #include <linux/kernel.h>
60 #include <linux/mm.h>
61 #include <linux/string.h>
62 #include <linux/socket.h>
63 #include <linux/sockios.h>
64 #include <linux/errno.h>
65 #include <linux/in.h>
66 #include <linux/inet.h>
67 #include <linux/inetdevice.h>
68 #include <linux/netdevice.h>
69 #include <linux/if_arp.h>
70 #include <linux/proc_fs.h>
71 #include <linux/rcupdate.h>
72 #include <linux/skbuff.h>
73 #include <linux/netlink.h>
74 #include <linux/init.h>
75 #include <linux/list.h>
76 #include <net/net_namespace.h>
77 #include <net/ip.h>
78 #include <net/protocol.h>
79 #include <net/route.h>
80 #include <net/tcp.h>
81 #include <net/sock.h>
82 #include <net/ip_fib.h>
83 #include "fib_lookup.h"
85 #define MAX_STAT_DEPTH 32
87 #define KEYLENGTH (8*sizeof(t_key))
89 typedef unsigned int t_key;
91 #define T_TNODE 0
92 #define T_LEAF 1
93 #define NODE_TYPE_MASK 0x1UL
94 #define NODE_TYPE(node) ((node)->parent & NODE_TYPE_MASK)
96 #define IS_TNODE(n) (!(n->parent & T_LEAF))
97 #define IS_LEAF(n) (n->parent & T_LEAF)
99 struct node {
100 unsigned long parent;
101 t_key key;
104 struct leaf {
105 unsigned long parent;
106 t_key key;
107 struct hlist_head list;
108 struct rcu_head rcu;
111 struct leaf_info {
112 struct hlist_node hlist;
113 struct rcu_head rcu;
114 int plen;
115 struct list_head falh;
118 struct tnode {
119 unsigned long parent;
120 t_key key;
121 unsigned char pos; /* 2log(KEYLENGTH) bits needed */
122 unsigned char bits; /* 2log(KEYLENGTH) bits needed */
123 unsigned int full_children; /* KEYLENGTH bits needed */
124 unsigned int empty_children; /* KEYLENGTH bits needed */
125 struct rcu_head rcu;
126 struct node *child[0];
129 #ifdef CONFIG_IP_FIB_TRIE_STATS
130 struct trie_use_stats {
131 unsigned int gets;
132 unsigned int backtrack;
133 unsigned int semantic_match_passed;
134 unsigned int semantic_match_miss;
135 unsigned int null_node_hit;
136 unsigned int resize_node_skipped;
138 #endif
140 struct trie_stat {
141 unsigned int totdepth;
142 unsigned int maxdepth;
143 unsigned int tnodes;
144 unsigned int leaves;
145 unsigned int nullpointers;
146 unsigned int prefixes;
147 unsigned int nodesizes[MAX_STAT_DEPTH];
150 struct trie {
151 struct node *trie;
152 #ifdef CONFIG_IP_FIB_TRIE_STATS
153 struct trie_use_stats stats;
154 #endif
157 static void put_child(struct trie *t, struct tnode *tn, int i, struct node *n);
158 static void tnode_put_child_reorg(struct tnode *tn, int i, struct node *n,
159 int wasfull);
160 static struct node *resize(struct trie *t, struct tnode *tn);
161 static struct tnode *inflate(struct trie *t, struct tnode *tn);
162 static struct tnode *halve(struct trie *t, struct tnode *tn);
163 static void tnode_free(struct tnode *tn);
165 static struct kmem_cache *fn_alias_kmem __read_mostly;
166 static struct kmem_cache *trie_leaf_kmem __read_mostly;
168 static inline struct tnode *node_parent(struct node *node)
170 return (struct tnode *)(node->parent & ~NODE_TYPE_MASK);
173 static inline struct tnode *node_parent_rcu(struct node *node)
175 struct tnode *ret = node_parent(node);
177 return rcu_dereference(ret);
180 /* Same as rcu_assign_pointer
181 * but that macro() assumes that value is a pointer.
183 static inline void node_set_parent(struct node *node, struct tnode *ptr)
185 smp_wmb();
186 node->parent = (unsigned long)ptr | NODE_TYPE(node);
189 static inline struct node *tnode_get_child(struct tnode *tn, unsigned int i)
191 BUG_ON(i >= 1U << tn->bits);
193 return tn->child[i];
196 static inline struct node *tnode_get_child_rcu(struct tnode *tn, unsigned int i)
198 struct node *ret = tnode_get_child(tn, i);
200 return rcu_dereference(ret);
203 static inline int tnode_child_length(const struct tnode *tn)
205 return 1 << tn->bits;
208 static inline t_key mask_pfx(t_key k, unsigned short l)
210 return (l == 0) ? 0 : k >> (KEYLENGTH-l) << (KEYLENGTH-l);
213 static inline t_key tkey_extract_bits(t_key a, int offset, int bits)
215 if (offset < KEYLENGTH)
216 return ((t_key)(a << offset)) >> (KEYLENGTH - bits);
217 else
218 return 0;
221 static inline int tkey_equals(t_key a, t_key b)
223 return a == b;
226 static inline int tkey_sub_equals(t_key a, int offset, int bits, t_key b)
228 if (bits == 0 || offset >= KEYLENGTH)
229 return 1;
230 bits = bits > KEYLENGTH ? KEYLENGTH : bits;
231 return ((a ^ b) << offset) >> (KEYLENGTH - bits) == 0;
234 static inline int tkey_mismatch(t_key a, int offset, t_key b)
236 t_key diff = a ^ b;
237 int i = offset;
239 if (!diff)
240 return 0;
241 while ((diff << i) >> (KEYLENGTH-1) == 0)
242 i++;
243 return i;
247 To understand this stuff, an understanding of keys and all their bits is
248 necessary. Every node in the trie has a key associated with it, but not
249 all of the bits in that key are significant.
251 Consider a node 'n' and its parent 'tp'.
253 If n is a leaf, every bit in its key is significant. Its presence is
254 necessitated by path compression, since during a tree traversal (when
255 searching for a leaf - unless we are doing an insertion) we will completely
256 ignore all skipped bits we encounter. Thus we need to verify, at the end of
257 a potentially successful search, that we have indeed been walking the
258 correct key path.
260 Note that we can never "miss" the correct key in the tree if present by
261 following the wrong path. Path compression ensures that segments of the key
262 that are the same for all keys with a given prefix are skipped, but the
263 skipped part *is* identical for each node in the subtrie below the skipped
264 bit! trie_insert() in this implementation takes care of that - note the
265 call to tkey_sub_equals() in trie_insert().
267 if n is an internal node - a 'tnode' here, the various parts of its key
268 have many different meanings.
270 Example:
271 _________________________________________________________________
272 | i | i | i | i | i | i | i | N | N | N | S | S | S | S | S | C |
273 -----------------------------------------------------------------
274 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
276 _________________________________________________________________
277 | C | C | C | u | u | u | u | u | u | u | u | u | u | u | u | u |
278 -----------------------------------------------------------------
279 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
281 tp->pos = 7
282 tp->bits = 3
283 n->pos = 15
284 n->bits = 4
286 First, let's just ignore the bits that come before the parent tp, that is
287 the bits from 0 to (tp->pos-1). They are *known* but at this point we do
288 not use them for anything.
290 The bits from (tp->pos) to (tp->pos + tp->bits - 1) - "N", above - are the
291 index into the parent's child array. That is, they will be used to find
292 'n' among tp's children.
294 The bits from (tp->pos + tp->bits) to (n->pos - 1) - "S" - are skipped bits
295 for the node n.
297 All the bits we have seen so far are significant to the node n. The rest
298 of the bits are really not needed or indeed known in n->key.
300 The bits from (n->pos) to (n->pos + n->bits - 1) - "C" - are the index into
301 n's child array, and will of course be different for each child.
304 The rest of the bits, from (n->pos + n->bits) onward, are completely unknown
305 at this point.
309 static inline void check_tnode(const struct tnode *tn)
311 WARN_ON(tn && tn->pos+tn->bits > 32);
314 static const int halve_threshold = 25;
315 static const int inflate_threshold = 50;
316 static const int halve_threshold_root = 8;
317 static const int inflate_threshold_root = 15;
320 static void __alias_free_mem(struct rcu_head *head)
322 struct fib_alias *fa = container_of(head, struct fib_alias, rcu);
323 kmem_cache_free(fn_alias_kmem, fa);
326 static inline void alias_free_mem_rcu(struct fib_alias *fa)
328 call_rcu(&fa->rcu, __alias_free_mem);
331 static void __leaf_free_rcu(struct rcu_head *head)
333 struct leaf *l = container_of(head, struct leaf, rcu);
334 kmem_cache_free(trie_leaf_kmem, l);
337 static void __leaf_info_free_rcu(struct rcu_head *head)
339 kfree(container_of(head, struct leaf_info, rcu));
342 static inline void free_leaf_info(struct leaf_info *leaf)
344 call_rcu(&leaf->rcu, __leaf_info_free_rcu);
347 static struct tnode *tnode_alloc(size_t size)
349 struct page *pages;
351 if (size <= PAGE_SIZE)
352 return kzalloc(size, GFP_KERNEL);
354 pages = alloc_pages(GFP_KERNEL|__GFP_ZERO, get_order(size));
355 if (!pages)
356 return NULL;
358 return page_address(pages);
361 static void __tnode_free_rcu(struct rcu_head *head)
363 struct tnode *tn = container_of(head, struct tnode, rcu);
364 size_t size = sizeof(struct tnode) +
365 (sizeof(struct node *) << tn->bits);
367 if (size <= PAGE_SIZE)
368 kfree(tn);
369 else
370 free_pages((unsigned long)tn, get_order(size));
373 static inline void tnode_free(struct tnode *tn)
375 if (IS_LEAF(tn)) {
376 struct leaf *l = (struct leaf *) tn;
377 call_rcu_bh(&l->rcu, __leaf_free_rcu);
378 } else
379 call_rcu(&tn->rcu, __tnode_free_rcu);
382 static struct leaf *leaf_new(void)
384 struct leaf *l = kmem_cache_alloc(trie_leaf_kmem, GFP_KERNEL);
385 if (l) {
386 l->parent = T_LEAF;
387 INIT_HLIST_HEAD(&l->list);
389 return l;
392 static struct leaf_info *leaf_info_new(int plen)
394 struct leaf_info *li = kmalloc(sizeof(struct leaf_info), GFP_KERNEL);
395 if (li) {
396 li->plen = plen;
397 INIT_LIST_HEAD(&li->falh);
399 return li;
402 static struct tnode *tnode_new(t_key key, int pos, int bits)
404 size_t sz = sizeof(struct tnode) + (sizeof(struct node *) << bits);
405 struct tnode *tn = tnode_alloc(sz);
407 if (tn) {
408 tn->parent = T_TNODE;
409 tn->pos = pos;
410 tn->bits = bits;
411 tn->key = key;
412 tn->full_children = 0;
413 tn->empty_children = 1<<bits;
416 pr_debug("AT %p s=%u %lu\n", tn, (unsigned int) sizeof(struct tnode),
417 (unsigned long) (sizeof(struct node) << bits));
418 return tn;
422 * Check whether a tnode 'n' is "full", i.e. it is an internal node
423 * and no bits are skipped. See discussion in dyntree paper p. 6
426 static inline int tnode_full(const struct tnode *tn, const struct node *n)
428 if (n == NULL || IS_LEAF(n))
429 return 0;
431 return ((struct tnode *) n)->pos == tn->pos + tn->bits;
434 static inline void put_child(struct trie *t, struct tnode *tn, int i,
435 struct node *n)
437 tnode_put_child_reorg(tn, i, n, -1);
441 * Add a child at position i overwriting the old value.
442 * Update the value of full_children and empty_children.
445 static void tnode_put_child_reorg(struct tnode *tn, int i, struct node *n,
446 int wasfull)
448 struct node *chi = tn->child[i];
449 int isfull;
451 BUG_ON(i >= 1<<tn->bits);
453 /* update emptyChildren */
454 if (n == NULL && chi != NULL)
455 tn->empty_children++;
456 else if (n != NULL && chi == NULL)
457 tn->empty_children--;
459 /* update fullChildren */
460 if (wasfull == -1)
461 wasfull = tnode_full(tn, chi);
463 isfull = tnode_full(tn, n);
464 if (wasfull && !isfull)
465 tn->full_children--;
466 else if (!wasfull && isfull)
467 tn->full_children++;
469 if (n)
470 node_set_parent(n, tn);
472 rcu_assign_pointer(tn->child[i], n);
475 static struct node *resize(struct trie *t, struct tnode *tn)
477 int i;
478 int err = 0;
479 struct tnode *old_tn;
480 int inflate_threshold_use;
481 int halve_threshold_use;
482 int max_resize;
484 if (!tn)
485 return NULL;
487 pr_debug("In tnode_resize %p inflate_threshold=%d threshold=%d\n",
488 tn, inflate_threshold, halve_threshold);
490 /* No children */
491 if (tn->empty_children == tnode_child_length(tn)) {
492 tnode_free(tn);
493 return NULL;
495 /* One child */
496 if (tn->empty_children == tnode_child_length(tn) - 1)
497 for (i = 0; i < tnode_child_length(tn); i++) {
498 struct node *n;
500 n = tn->child[i];
501 if (!n)
502 continue;
504 /* compress one level */
505 node_set_parent(n, NULL);
506 tnode_free(tn);
507 return n;
510 * Double as long as the resulting node has a number of
511 * nonempty nodes that are above the threshold.
515 * From "Implementing a dynamic compressed trie" by Stefan Nilsson of
516 * the Helsinki University of Technology and Matti Tikkanen of Nokia
517 * Telecommunications, page 6:
518 * "A node is doubled if the ratio of non-empty children to all
519 * children in the *doubled* node is at least 'high'."
521 * 'high' in this instance is the variable 'inflate_threshold'. It
522 * is expressed as a percentage, so we multiply it with
523 * tnode_child_length() and instead of multiplying by 2 (since the
524 * child array will be doubled by inflate()) and multiplying
525 * the left-hand side by 100 (to handle the percentage thing) we
526 * multiply the left-hand side by 50.
528 * The left-hand side may look a bit weird: tnode_child_length(tn)
529 * - tn->empty_children is of course the number of non-null children
530 * in the current node. tn->full_children is the number of "full"
531 * children, that is non-null tnodes with a skip value of 0.
532 * All of those will be doubled in the resulting inflated tnode, so
533 * we just count them one extra time here.
535 * A clearer way to write this would be:
537 * to_be_doubled = tn->full_children;
538 * not_to_be_doubled = tnode_child_length(tn) - tn->empty_children -
539 * tn->full_children;
541 * new_child_length = tnode_child_length(tn) * 2;
543 * new_fill_factor = 100 * (not_to_be_doubled + 2*to_be_doubled) /
544 * new_child_length;
545 * if (new_fill_factor >= inflate_threshold)
547 * ...and so on, tho it would mess up the while () loop.
549 * anyway,
550 * 100 * (not_to_be_doubled + 2*to_be_doubled) / new_child_length >=
551 * inflate_threshold
553 * avoid a division:
554 * 100 * (not_to_be_doubled + 2*to_be_doubled) >=
555 * inflate_threshold * new_child_length
557 * expand not_to_be_doubled and to_be_doubled, and shorten:
558 * 100 * (tnode_child_length(tn) - tn->empty_children +
559 * tn->full_children) >= inflate_threshold * new_child_length
561 * expand new_child_length:
562 * 100 * (tnode_child_length(tn) - tn->empty_children +
563 * tn->full_children) >=
564 * inflate_threshold * tnode_child_length(tn) * 2
566 * shorten again:
567 * 50 * (tn->full_children + tnode_child_length(tn) -
568 * tn->empty_children) >= inflate_threshold *
569 * tnode_child_length(tn)
573 check_tnode(tn);
575 /* Keep root node larger */
577 if (!tn->parent)
578 inflate_threshold_use = inflate_threshold_root;
579 else
580 inflate_threshold_use = inflate_threshold;
582 err = 0;
583 max_resize = 10;
584 while ((tn->full_children > 0 && max_resize-- &&
585 50 * (tn->full_children + tnode_child_length(tn)
586 - tn->empty_children)
587 >= inflate_threshold_use * tnode_child_length(tn))) {
589 old_tn = tn;
590 tn = inflate(t, tn);
592 if (IS_ERR(tn)) {
593 tn = old_tn;
594 #ifdef CONFIG_IP_FIB_TRIE_STATS
595 t->stats.resize_node_skipped++;
596 #endif
597 break;
601 if (max_resize < 0) {
602 if (!tn->parent)
603 pr_warning("Fix inflate_threshold_root."
604 " Now=%d size=%d bits\n",
605 inflate_threshold_root, tn->bits);
606 else
607 pr_warning("Fix inflate_threshold."
608 " Now=%d size=%d bits\n",
609 inflate_threshold, tn->bits);
612 check_tnode(tn);
615 * Halve as long as the number of empty children in this
616 * node is above threshold.
620 /* Keep root node larger */
622 if (!tn->parent)
623 halve_threshold_use = halve_threshold_root;
624 else
625 halve_threshold_use = halve_threshold;
627 err = 0;
628 max_resize = 10;
629 while (tn->bits > 1 && max_resize-- &&
630 100 * (tnode_child_length(tn) - tn->empty_children) <
631 halve_threshold_use * tnode_child_length(tn)) {
633 old_tn = tn;
634 tn = halve(t, tn);
635 if (IS_ERR(tn)) {
636 tn = old_tn;
637 #ifdef CONFIG_IP_FIB_TRIE_STATS
638 t->stats.resize_node_skipped++;
639 #endif
640 break;
644 if (max_resize < 0) {
645 if (!tn->parent)
646 pr_warning("Fix halve_threshold_root."
647 " Now=%d size=%d bits\n",
648 halve_threshold_root, tn->bits);
649 else
650 pr_warning("Fix halve_threshold."
651 " Now=%d size=%d bits\n",
652 halve_threshold, tn->bits);
655 /* Only one child remains */
656 if (tn->empty_children == tnode_child_length(tn) - 1)
657 for (i = 0; i < tnode_child_length(tn); i++) {
658 struct node *n;
660 n = tn->child[i];
661 if (!n)
662 continue;
664 /* compress one level */
666 node_set_parent(n, NULL);
667 tnode_free(tn);
668 return n;
671 return (struct node *) tn;
674 static struct tnode *inflate(struct trie *t, struct tnode *tn)
676 struct tnode *oldtnode = tn;
677 int olen = tnode_child_length(tn);
678 int i;
680 pr_debug("In inflate\n");
682 tn = tnode_new(oldtnode->key, oldtnode->pos, oldtnode->bits + 1);
684 if (!tn)
685 return ERR_PTR(-ENOMEM);
688 * Preallocate and store tnodes before the actual work so we
689 * don't get into an inconsistent state if memory allocation
690 * fails. In case of failure we return the oldnode and inflate
691 * of tnode is ignored.
694 for (i = 0; i < olen; i++) {
695 struct tnode *inode;
697 inode = (struct tnode *) tnode_get_child(oldtnode, i);
698 if (inode &&
699 IS_TNODE(inode) &&
700 inode->pos == oldtnode->pos + oldtnode->bits &&
701 inode->bits > 1) {
702 struct tnode *left, *right;
703 t_key m = ~0U << (KEYLENGTH - 1) >> inode->pos;
705 left = tnode_new(inode->key&(~m), inode->pos + 1,
706 inode->bits - 1);
707 if (!left)
708 goto nomem;
710 right = tnode_new(inode->key|m, inode->pos + 1,
711 inode->bits - 1);
713 if (!right) {
714 tnode_free(left);
715 goto nomem;
718 put_child(t, tn, 2*i, (struct node *) left);
719 put_child(t, tn, 2*i+1, (struct node *) right);
723 for (i = 0; i < olen; i++) {
724 struct tnode *inode;
725 struct node *node = tnode_get_child(oldtnode, i);
726 struct tnode *left, *right;
727 int size, j;
729 /* An empty child */
730 if (node == NULL)
731 continue;
733 /* A leaf or an internal node with skipped bits */
735 if (IS_LEAF(node) || ((struct tnode *) node)->pos >
736 tn->pos + tn->bits - 1) {
737 if (tkey_extract_bits(node->key,
738 oldtnode->pos + oldtnode->bits,
739 1) == 0)
740 put_child(t, tn, 2*i, node);
741 else
742 put_child(t, tn, 2*i+1, node);
743 continue;
746 /* An internal node with two children */
747 inode = (struct tnode *) node;
749 if (inode->bits == 1) {
750 put_child(t, tn, 2*i, inode->child[0]);
751 put_child(t, tn, 2*i+1, inode->child[1]);
753 tnode_free(inode);
754 continue;
757 /* An internal node with more than two children */
759 /* We will replace this node 'inode' with two new
760 * ones, 'left' and 'right', each with half of the
761 * original children. The two new nodes will have
762 * a position one bit further down the key and this
763 * means that the "significant" part of their keys
764 * (see the discussion near the top of this file)
765 * will differ by one bit, which will be "0" in
766 * left's key and "1" in right's key. Since we are
767 * moving the key position by one step, the bit that
768 * we are moving away from - the bit at position
769 * (inode->pos) - is the one that will differ between
770 * left and right. So... we synthesize that bit in the
771 * two new keys.
772 * The mask 'm' below will be a single "one" bit at
773 * the position (inode->pos)
776 /* Use the old key, but set the new significant
777 * bit to zero.
780 left = (struct tnode *) tnode_get_child(tn, 2*i);
781 put_child(t, tn, 2*i, NULL);
783 BUG_ON(!left);
785 right = (struct tnode *) tnode_get_child(tn, 2*i+1);
786 put_child(t, tn, 2*i+1, NULL);
788 BUG_ON(!right);
790 size = tnode_child_length(left);
791 for (j = 0; j < size; j++) {
792 put_child(t, left, j, inode->child[j]);
793 put_child(t, right, j, inode->child[j + size]);
795 put_child(t, tn, 2*i, resize(t, left));
796 put_child(t, tn, 2*i+1, resize(t, right));
798 tnode_free(inode);
800 tnode_free(oldtnode);
801 return tn;
802 nomem:
804 int size = tnode_child_length(tn);
805 int j;
807 for (j = 0; j < size; j++)
808 if (tn->child[j])
809 tnode_free((struct tnode *)tn->child[j]);
811 tnode_free(tn);
813 return ERR_PTR(-ENOMEM);
817 static struct tnode *halve(struct trie *t, struct tnode *tn)
819 struct tnode *oldtnode = tn;
820 struct node *left, *right;
821 int i;
822 int olen = tnode_child_length(tn);
824 pr_debug("In halve\n");
826 tn = tnode_new(oldtnode->key, oldtnode->pos, oldtnode->bits - 1);
828 if (!tn)
829 return ERR_PTR(-ENOMEM);
832 * Preallocate and store tnodes before the actual work so we
833 * don't get into an inconsistent state if memory allocation
834 * fails. In case of failure we return the oldnode and halve
835 * of tnode is ignored.
838 for (i = 0; i < olen; i += 2) {
839 left = tnode_get_child(oldtnode, i);
840 right = tnode_get_child(oldtnode, i+1);
842 /* Two nonempty children */
843 if (left && right) {
844 struct tnode *newn;
846 newn = tnode_new(left->key, tn->pos + tn->bits, 1);
848 if (!newn)
849 goto nomem;
851 put_child(t, tn, i/2, (struct node *)newn);
856 for (i = 0; i < olen; i += 2) {
857 struct tnode *newBinNode;
859 left = tnode_get_child(oldtnode, i);
860 right = tnode_get_child(oldtnode, i+1);
862 /* At least one of the children is empty */
863 if (left == NULL) {
864 if (right == NULL) /* Both are empty */
865 continue;
866 put_child(t, tn, i/2, right);
867 continue;
870 if (right == NULL) {
871 put_child(t, tn, i/2, left);
872 continue;
875 /* Two nonempty children */
876 newBinNode = (struct tnode *) tnode_get_child(tn, i/2);
877 put_child(t, tn, i/2, NULL);
878 put_child(t, newBinNode, 0, left);
879 put_child(t, newBinNode, 1, right);
880 put_child(t, tn, i/2, resize(t, newBinNode));
882 tnode_free(oldtnode);
883 return tn;
884 nomem:
886 int size = tnode_child_length(tn);
887 int j;
889 for (j = 0; j < size; j++)
890 if (tn->child[j])
891 tnode_free((struct tnode *)tn->child[j]);
893 tnode_free(tn);
895 return ERR_PTR(-ENOMEM);
899 /* readside must use rcu_read_lock currently dump routines
900 via get_fa_head and dump */
902 static struct leaf_info *find_leaf_info(struct leaf *l, int plen)
904 struct hlist_head *head = &l->list;
905 struct hlist_node *node;
906 struct leaf_info *li;
908 hlist_for_each_entry_rcu(li, node, head, hlist)
909 if (li->plen == plen)
910 return li;
912 return NULL;
915 static inline struct list_head *get_fa_head(struct leaf *l, int plen)
917 struct leaf_info *li = find_leaf_info(l, plen);
919 if (!li)
920 return NULL;
922 return &li->falh;
925 static void insert_leaf_info(struct hlist_head *head, struct leaf_info *new)
927 struct leaf_info *li = NULL, *last = NULL;
928 struct hlist_node *node;
930 if (hlist_empty(head)) {
931 hlist_add_head_rcu(&new->hlist, head);
932 } else {
933 hlist_for_each_entry(li, node, head, hlist) {
934 if (new->plen > li->plen)
935 break;
937 last = li;
939 if (last)
940 hlist_add_after_rcu(&last->hlist, &new->hlist);
941 else
942 hlist_add_before_rcu(&new->hlist, &li->hlist);
946 /* rcu_read_lock needs to be hold by caller from readside */
948 static struct leaf *
949 fib_find_node(struct trie *t, u32 key)
951 int pos;
952 struct tnode *tn;
953 struct node *n;
955 pos = 0;
956 n = rcu_dereference(t->trie);
958 while (n != NULL && NODE_TYPE(n) == T_TNODE) {
959 tn = (struct tnode *) n;
961 check_tnode(tn);
963 if (tkey_sub_equals(tn->key, pos, tn->pos-pos, key)) {
964 pos = tn->pos + tn->bits;
965 n = tnode_get_child_rcu(tn,
966 tkey_extract_bits(key,
967 tn->pos,
968 tn->bits));
969 } else
970 break;
972 /* Case we have found a leaf. Compare prefixes */
974 if (n != NULL && IS_LEAF(n) && tkey_equals(key, n->key))
975 return (struct leaf *)n;
977 return NULL;
980 static struct node *trie_rebalance(struct trie *t, struct tnode *tn)
982 int wasfull;
983 t_key cindex, key = tn->key;
984 struct tnode *tp;
986 while (tn != NULL && (tp = node_parent((struct node *)tn)) != NULL) {
987 cindex = tkey_extract_bits(key, tp->pos, tp->bits);
988 wasfull = tnode_full(tp, tnode_get_child(tp, cindex));
989 tn = (struct tnode *) resize(t, (struct tnode *)tn);
991 tnode_put_child_reorg((struct tnode *)tp, cindex,
992 (struct node *)tn, wasfull);
994 tp = node_parent((struct node *) tn);
995 if (!tp)
996 break;
997 tn = tp;
1000 /* Handle last (top) tnode */
1001 if (IS_TNODE(tn))
1002 tn = (struct tnode *)resize(t, (struct tnode *)tn);
1004 return (struct node *)tn;
1007 /* only used from updater-side */
1009 static struct list_head *fib_insert_node(struct trie *t, u32 key, int plen)
1011 int pos, newpos;
1012 struct tnode *tp = NULL, *tn = NULL;
1013 struct node *n;
1014 struct leaf *l;
1015 int missbit;
1016 struct list_head *fa_head = NULL;
1017 struct leaf_info *li;
1018 t_key cindex;
1020 pos = 0;
1021 n = t->trie;
1023 /* If we point to NULL, stop. Either the tree is empty and we should
1024 * just put a new leaf in if, or we have reached an empty child slot,
1025 * and we should just put our new leaf in that.
1026 * If we point to a T_TNODE, check if it matches our key. Note that
1027 * a T_TNODE might be skipping any number of bits - its 'pos' need
1028 * not be the parent's 'pos'+'bits'!
1030 * If it does match the current key, get pos/bits from it, extract
1031 * the index from our key, push the T_TNODE and walk the tree.
1033 * If it doesn't, we have to replace it with a new T_TNODE.
1035 * If we point to a T_LEAF, it might or might not have the same key
1036 * as we do. If it does, just change the value, update the T_LEAF's
1037 * value, and return it.
1038 * If it doesn't, we need to replace it with a T_TNODE.
1041 while (n != NULL && NODE_TYPE(n) == T_TNODE) {
1042 tn = (struct tnode *) n;
1044 check_tnode(tn);
1046 if (tkey_sub_equals(tn->key, pos, tn->pos-pos, key)) {
1047 tp = tn;
1048 pos = tn->pos + tn->bits;
1049 n = tnode_get_child(tn,
1050 tkey_extract_bits(key,
1051 tn->pos,
1052 tn->bits));
1054 BUG_ON(n && node_parent(n) != tn);
1055 } else
1056 break;
1060 * n ----> NULL, LEAF or TNODE
1062 * tp is n's (parent) ----> NULL or TNODE
1065 BUG_ON(tp && IS_LEAF(tp));
1067 /* Case 1: n is a leaf. Compare prefixes */
1069 if (n != NULL && IS_LEAF(n) && tkey_equals(key, n->key)) {
1070 l = (struct leaf *) n;
1071 li = leaf_info_new(plen);
1073 if (!li)
1074 return NULL;
1076 fa_head = &li->falh;
1077 insert_leaf_info(&l->list, li);
1078 goto done;
1080 l = leaf_new();
1082 if (!l)
1083 return NULL;
1085 l->key = key;
1086 li = leaf_info_new(plen);
1088 if (!li) {
1089 tnode_free((struct tnode *) l);
1090 return NULL;
1093 fa_head = &li->falh;
1094 insert_leaf_info(&l->list, li);
1096 if (t->trie && n == NULL) {
1097 /* Case 2: n is NULL, and will just insert a new leaf */
1099 node_set_parent((struct node *)l, tp);
1101 cindex = tkey_extract_bits(key, tp->pos, tp->bits);
1102 put_child(t, (struct tnode *)tp, cindex, (struct node *)l);
1103 } else {
1104 /* Case 3: n is a LEAF or a TNODE and the key doesn't match. */
1106 * Add a new tnode here
1107 * first tnode need some special handling
1110 if (tp)
1111 pos = tp->pos+tp->bits;
1112 else
1113 pos = 0;
1115 if (n) {
1116 newpos = tkey_mismatch(key, pos, n->key);
1117 tn = tnode_new(n->key, newpos, 1);
1118 } else {
1119 newpos = 0;
1120 tn = tnode_new(key, newpos, 1); /* First tnode */
1123 if (!tn) {
1124 free_leaf_info(li);
1125 tnode_free((struct tnode *) l);
1126 return NULL;
1129 node_set_parent((struct node *)tn, tp);
1131 missbit = tkey_extract_bits(key, newpos, 1);
1132 put_child(t, tn, missbit, (struct node *)l);
1133 put_child(t, tn, 1-missbit, n);
1135 if (tp) {
1136 cindex = tkey_extract_bits(key, tp->pos, tp->bits);
1137 put_child(t, (struct tnode *)tp, cindex,
1138 (struct node *)tn);
1139 } else {
1140 rcu_assign_pointer(t->trie, (struct node *)tn);
1141 tp = tn;
1145 if (tp && tp->pos + tp->bits > 32)
1146 pr_warning("fib_trie"
1147 " tp=%p pos=%d, bits=%d, key=%0x plen=%d\n",
1148 tp, tp->pos, tp->bits, key, plen);
1150 /* Rebalance the trie */
1152 rcu_assign_pointer(t->trie, trie_rebalance(t, tp));
1153 done:
1154 return fa_head;
1158 * Caller must hold RTNL.
1160 static int fn_trie_insert(struct fib_table *tb, struct fib_config *cfg)
1162 struct trie *t = (struct trie *) tb->tb_data;
1163 struct fib_alias *fa, *new_fa;
1164 struct list_head *fa_head = NULL;
1165 struct fib_info *fi;
1166 int plen = cfg->fc_dst_len;
1167 u8 tos = cfg->fc_tos;
1168 u32 key, mask;
1169 int err;
1170 struct leaf *l;
1172 if (plen > 32)
1173 return -EINVAL;
1175 key = ntohl(cfg->fc_dst);
1177 pr_debug("Insert table=%u %08x/%d\n", tb->tb_id, key, plen);
1179 mask = ntohl(inet_make_mask(plen));
1181 if (key & ~mask)
1182 return -EINVAL;
1184 key = key & mask;
1186 fi = fib_create_info(cfg);
1187 if (IS_ERR(fi)) {
1188 err = PTR_ERR(fi);
1189 goto err;
1192 l = fib_find_node(t, key);
1193 fa = NULL;
1195 if (l) {
1196 fa_head = get_fa_head(l, plen);
1197 fa = fib_find_alias(fa_head, tos, fi->fib_priority);
1200 /* Now fa, if non-NULL, points to the first fib alias
1201 * with the same keys [prefix,tos,priority], if such key already
1202 * exists or to the node before which we will insert new one.
1204 * If fa is NULL, we will need to allocate a new one and
1205 * insert to the head of f.
1207 * If f is NULL, no fib node matched the destination key
1208 * and we need to allocate a new one of those as well.
1211 if (fa && fa->fa_tos == tos &&
1212 fa->fa_info->fib_priority == fi->fib_priority) {
1213 struct fib_alias *fa_first, *fa_match;
1215 err = -EEXIST;
1216 if (cfg->fc_nlflags & NLM_F_EXCL)
1217 goto out;
1219 /* We have 2 goals:
1220 * 1. Find exact match for type, scope, fib_info to avoid
1221 * duplicate routes
1222 * 2. Find next 'fa' (or head), NLM_F_APPEND inserts before it
1224 fa_match = NULL;
1225 fa_first = fa;
1226 fa = list_entry(fa->fa_list.prev, struct fib_alias, fa_list);
1227 list_for_each_entry_continue(fa, fa_head, fa_list) {
1228 if (fa->fa_tos != tos)
1229 break;
1230 if (fa->fa_info->fib_priority != fi->fib_priority)
1231 break;
1232 if (fa->fa_type == cfg->fc_type &&
1233 fa->fa_scope == cfg->fc_scope &&
1234 fa->fa_info == fi) {
1235 fa_match = fa;
1236 break;
1240 if (cfg->fc_nlflags & NLM_F_REPLACE) {
1241 struct fib_info *fi_drop;
1242 u8 state;
1244 fa = fa_first;
1245 if (fa_match) {
1246 if (fa == fa_match)
1247 err = 0;
1248 goto out;
1250 err = -ENOBUFS;
1251 new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
1252 if (new_fa == NULL)
1253 goto out;
1255 fi_drop = fa->fa_info;
1256 new_fa->fa_tos = fa->fa_tos;
1257 new_fa->fa_info = fi;
1258 new_fa->fa_type = cfg->fc_type;
1259 new_fa->fa_scope = cfg->fc_scope;
1260 state = fa->fa_state;
1261 new_fa->fa_state = state & ~FA_S_ACCESSED;
1263 list_replace_rcu(&fa->fa_list, &new_fa->fa_list);
1264 alias_free_mem_rcu(fa);
1266 fib_release_info(fi_drop);
1267 if (state & FA_S_ACCESSED)
1268 rt_cache_flush(-1);
1269 rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen,
1270 tb->tb_id, &cfg->fc_nlinfo, NLM_F_REPLACE);
1272 goto succeeded;
1274 /* Error if we find a perfect match which
1275 * uses the same scope, type, and nexthop
1276 * information.
1278 if (fa_match)
1279 goto out;
1281 if (!(cfg->fc_nlflags & NLM_F_APPEND))
1282 fa = fa_first;
1284 err = -ENOENT;
1285 if (!(cfg->fc_nlflags & NLM_F_CREATE))
1286 goto out;
1288 err = -ENOBUFS;
1289 new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
1290 if (new_fa == NULL)
1291 goto out;
1293 new_fa->fa_info = fi;
1294 new_fa->fa_tos = tos;
1295 new_fa->fa_type = cfg->fc_type;
1296 new_fa->fa_scope = cfg->fc_scope;
1297 new_fa->fa_state = 0;
1299 * Insert new entry to the list.
1302 if (!fa_head) {
1303 fa_head = fib_insert_node(t, key, plen);
1304 if (unlikely(!fa_head)) {
1305 err = -ENOMEM;
1306 goto out_free_new_fa;
1310 list_add_tail_rcu(&new_fa->fa_list,
1311 (fa ? &fa->fa_list : fa_head));
1313 rt_cache_flush(-1);
1314 rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, tb->tb_id,
1315 &cfg->fc_nlinfo, 0);
1316 succeeded:
1317 return 0;
1319 out_free_new_fa:
1320 kmem_cache_free(fn_alias_kmem, new_fa);
1321 out:
1322 fib_release_info(fi);
1323 err:
1324 return err;
1327 /* should be called with rcu_read_lock */
1328 static int check_leaf(struct trie *t, struct leaf *l,
1329 t_key key, const struct flowi *flp,
1330 struct fib_result *res)
1332 struct leaf_info *li;
1333 struct hlist_head *hhead = &l->list;
1334 struct hlist_node *node;
1336 hlist_for_each_entry_rcu(li, node, hhead, hlist) {
1337 int err;
1338 int plen = li->plen;
1339 __be32 mask = inet_make_mask(plen);
1341 if (l->key != (key & ntohl(mask)))
1342 continue;
1344 err = fib_semantic_match(&li->falh, flp, res,
1345 htonl(l->key), mask, plen);
1347 #ifdef CONFIG_IP_FIB_TRIE_STATS
1348 if (err <= 0)
1349 t->stats.semantic_match_passed++;
1350 else
1351 t->stats.semantic_match_miss++;
1352 #endif
1353 if (err <= 0)
1354 return plen;
1357 return -1;
1360 static int fn_trie_lookup(struct fib_table *tb, const struct flowi *flp,
1361 struct fib_result *res)
1363 struct trie *t = (struct trie *) tb->tb_data;
1364 int plen, ret = 0;
1365 struct node *n;
1366 struct tnode *pn;
1367 int pos, bits;
1368 t_key key = ntohl(flp->fl4_dst);
1369 int chopped_off;
1370 t_key cindex = 0;
1371 int current_prefix_length = KEYLENGTH;
1372 struct tnode *cn;
1373 t_key node_prefix, key_prefix, pref_mismatch;
1374 int mp;
1376 rcu_read_lock();
1378 n = rcu_dereference(t->trie);
1379 if (!n)
1380 goto failed;
1382 #ifdef CONFIG_IP_FIB_TRIE_STATS
1383 t->stats.gets++;
1384 #endif
1386 /* Just a leaf? */
1387 if (IS_LEAF(n)) {
1388 plen = check_leaf(t, (struct leaf *)n, key, flp, res);
1389 if (plen < 0)
1390 goto failed;
1391 ret = 0;
1392 goto found;
1395 pn = (struct tnode *) n;
1396 chopped_off = 0;
1398 while (pn) {
1399 pos = pn->pos;
1400 bits = pn->bits;
1402 if (!chopped_off)
1403 cindex = tkey_extract_bits(mask_pfx(key, current_prefix_length),
1404 pos, bits);
1406 n = tnode_get_child(pn, cindex);
1408 if (n == NULL) {
1409 #ifdef CONFIG_IP_FIB_TRIE_STATS
1410 t->stats.null_node_hit++;
1411 #endif
1412 goto backtrace;
1415 if (IS_LEAF(n)) {
1416 plen = check_leaf(t, (struct leaf *)n, key, flp, res);
1417 if (plen < 0)
1418 goto backtrace;
1420 ret = 0;
1421 goto found;
1424 cn = (struct tnode *)n;
1427 * It's a tnode, and we can do some extra checks here if we
1428 * like, to avoid descending into a dead-end branch.
1429 * This tnode is in the parent's child array at index
1430 * key[p_pos..p_pos+p_bits] but potentially with some bits
1431 * chopped off, so in reality the index may be just a
1432 * subprefix, padded with zero at the end.
1433 * We can also take a look at any skipped bits in this
1434 * tnode - everything up to p_pos is supposed to be ok,
1435 * and the non-chopped bits of the index (se previous
1436 * paragraph) are also guaranteed ok, but the rest is
1437 * considered unknown.
1439 * The skipped bits are key[pos+bits..cn->pos].
1442 /* If current_prefix_length < pos+bits, we are already doing
1443 * actual prefix matching, which means everything from
1444 * pos+(bits-chopped_off) onward must be zero along some
1445 * branch of this subtree - otherwise there is *no* valid
1446 * prefix present. Here we can only check the skipped
1447 * bits. Remember, since we have already indexed into the
1448 * parent's child array, we know that the bits we chopped of
1449 * *are* zero.
1452 /* NOTA BENE: Checking only skipped bits
1453 for the new node here */
1455 if (current_prefix_length < pos+bits) {
1456 if (tkey_extract_bits(cn->key, current_prefix_length,
1457 cn->pos - current_prefix_length)
1458 || !(cn->child[0]))
1459 goto backtrace;
1463 * If chopped_off=0, the index is fully validated and we
1464 * only need to look at the skipped bits for this, the new,
1465 * tnode. What we actually want to do is to find out if
1466 * these skipped bits match our key perfectly, or if we will
1467 * have to count on finding a matching prefix further down,
1468 * because if we do, we would like to have some way of
1469 * verifying the existence of such a prefix at this point.
1472 /* The only thing we can do at this point is to verify that
1473 * any such matching prefix can indeed be a prefix to our
1474 * key, and if the bits in the node we are inspecting that
1475 * do not match our key are not ZERO, this cannot be true.
1476 * Thus, find out where there is a mismatch (before cn->pos)
1477 * and verify that all the mismatching bits are zero in the
1478 * new tnode's key.
1482 * Note: We aren't very concerned about the piece of
1483 * the key that precede pn->pos+pn->bits, since these
1484 * have already been checked. The bits after cn->pos
1485 * aren't checked since these are by definition
1486 * "unknown" at this point. Thus, what we want to see
1487 * is if we are about to enter the "prefix matching"
1488 * state, and in that case verify that the skipped
1489 * bits that will prevail throughout this subtree are
1490 * zero, as they have to be if we are to find a
1491 * matching prefix.
1494 node_prefix = mask_pfx(cn->key, cn->pos);
1495 key_prefix = mask_pfx(key, cn->pos);
1496 pref_mismatch = key_prefix^node_prefix;
1497 mp = 0;
1500 * In short: If skipped bits in this node do not match
1501 * the search key, enter the "prefix matching"
1502 * state.directly.
1504 if (pref_mismatch) {
1505 while (!(pref_mismatch & (1<<(KEYLENGTH-1)))) {
1506 mp++;
1507 pref_mismatch = pref_mismatch << 1;
1509 key_prefix = tkey_extract_bits(cn->key, mp, cn->pos-mp);
1511 if (key_prefix != 0)
1512 goto backtrace;
1514 if (current_prefix_length >= cn->pos)
1515 current_prefix_length = mp;
1518 pn = (struct tnode *)n; /* Descend */
1519 chopped_off = 0;
1520 continue;
1522 backtrace:
1523 chopped_off++;
1525 /* As zero don't change the child key (cindex) */
1526 while ((chopped_off <= pn->bits)
1527 && !(cindex & (1<<(chopped_off-1))))
1528 chopped_off++;
1530 /* Decrease current_... with bits chopped off */
1531 if (current_prefix_length > pn->pos + pn->bits - chopped_off)
1532 current_prefix_length = pn->pos + pn->bits
1533 - chopped_off;
1536 * Either we do the actual chop off according or if we have
1537 * chopped off all bits in this tnode walk up to our parent.
1540 if (chopped_off <= pn->bits) {
1541 cindex &= ~(1 << (chopped_off-1));
1542 } else {
1543 struct tnode *parent = node_parent((struct node *) pn);
1544 if (!parent)
1545 goto failed;
1547 /* Get Child's index */
1548 cindex = tkey_extract_bits(pn->key, parent->pos, parent->bits);
1549 pn = parent;
1550 chopped_off = 0;
1552 #ifdef CONFIG_IP_FIB_TRIE_STATS
1553 t->stats.backtrack++;
1554 #endif
1555 goto backtrace;
1558 failed:
1559 ret = 1;
1560 found:
1561 rcu_read_unlock();
1562 return ret;
1566 * Remove the leaf and return parent.
1568 static void trie_leaf_remove(struct trie *t, struct leaf *l)
1570 struct tnode *tp = node_parent((struct node *) l);
1572 pr_debug("entering trie_leaf_remove(%p)\n", l);
1574 if (tp) {
1575 t_key cindex = tkey_extract_bits(l->key, tp->pos, tp->bits);
1576 put_child(t, (struct tnode *)tp, cindex, NULL);
1577 rcu_assign_pointer(t->trie, trie_rebalance(t, tp));
1578 } else
1579 rcu_assign_pointer(t->trie, NULL);
1581 tnode_free((struct tnode *) l);
1585 * Caller must hold RTNL.
1587 static int fn_trie_delete(struct fib_table *tb, struct fib_config *cfg)
1589 struct trie *t = (struct trie *) tb->tb_data;
1590 u32 key, mask;
1591 int plen = cfg->fc_dst_len;
1592 u8 tos = cfg->fc_tos;
1593 struct fib_alias *fa, *fa_to_delete;
1594 struct list_head *fa_head;
1595 struct leaf *l;
1596 struct leaf_info *li;
1598 if (plen > 32)
1599 return -EINVAL;
1601 key = ntohl(cfg->fc_dst);
1602 mask = ntohl(inet_make_mask(plen));
1604 if (key & ~mask)
1605 return -EINVAL;
1607 key = key & mask;
1608 l = fib_find_node(t, key);
1610 if (!l)
1611 return -ESRCH;
1613 fa_head = get_fa_head(l, plen);
1614 fa = fib_find_alias(fa_head, tos, 0);
1616 if (!fa)
1617 return -ESRCH;
1619 pr_debug("Deleting %08x/%d tos=%d t=%p\n", key, plen, tos, t);
1621 fa_to_delete = NULL;
1622 fa = list_entry(fa->fa_list.prev, struct fib_alias, fa_list);
1623 list_for_each_entry_continue(fa, fa_head, fa_list) {
1624 struct fib_info *fi = fa->fa_info;
1626 if (fa->fa_tos != tos)
1627 break;
1629 if ((!cfg->fc_type || fa->fa_type == cfg->fc_type) &&
1630 (cfg->fc_scope == RT_SCOPE_NOWHERE ||
1631 fa->fa_scope == cfg->fc_scope) &&
1632 (!cfg->fc_protocol ||
1633 fi->fib_protocol == cfg->fc_protocol) &&
1634 fib_nh_match(cfg, fi) == 0) {
1635 fa_to_delete = fa;
1636 break;
1640 if (!fa_to_delete)
1641 return -ESRCH;
1643 fa = fa_to_delete;
1644 rtmsg_fib(RTM_DELROUTE, htonl(key), fa, plen, tb->tb_id,
1645 &cfg->fc_nlinfo, 0);
1647 l = fib_find_node(t, key);
1648 li = find_leaf_info(l, plen);
1650 list_del_rcu(&fa->fa_list);
1652 if (list_empty(fa_head)) {
1653 hlist_del_rcu(&li->hlist);
1654 free_leaf_info(li);
1657 if (hlist_empty(&l->list))
1658 trie_leaf_remove(t, l);
1660 if (fa->fa_state & FA_S_ACCESSED)
1661 rt_cache_flush(-1);
1663 fib_release_info(fa->fa_info);
1664 alias_free_mem_rcu(fa);
1665 return 0;
1668 static int trie_flush_list(struct trie *t, struct list_head *head)
1670 struct fib_alias *fa, *fa_node;
1671 int found = 0;
1673 list_for_each_entry_safe(fa, fa_node, head, fa_list) {
1674 struct fib_info *fi = fa->fa_info;
1676 if (fi && (fi->fib_flags & RTNH_F_DEAD)) {
1677 list_del_rcu(&fa->fa_list);
1678 fib_release_info(fa->fa_info);
1679 alias_free_mem_rcu(fa);
1680 found++;
1683 return found;
1686 static int trie_flush_leaf(struct trie *t, struct leaf *l)
1688 int found = 0;
1689 struct hlist_head *lih = &l->list;
1690 struct hlist_node *node, *tmp;
1691 struct leaf_info *li = NULL;
1693 hlist_for_each_entry_safe(li, node, tmp, lih, hlist) {
1694 found += trie_flush_list(t, &li->falh);
1696 if (list_empty(&li->falh)) {
1697 hlist_del_rcu(&li->hlist);
1698 free_leaf_info(li);
1701 return found;
1705 * Scan for the next right leaf starting at node p->child[idx]
1706 * Since we have back pointer, no recursion necessary.
1708 static struct leaf *leaf_walk_rcu(struct tnode *p, struct node *c)
1710 do {
1711 t_key idx;
1713 if (c)
1714 idx = tkey_extract_bits(c->key, p->pos, p->bits) + 1;
1715 else
1716 idx = 0;
1718 while (idx < 1u << p->bits) {
1719 c = tnode_get_child_rcu(p, idx++);
1720 if (!c)
1721 continue;
1723 if (IS_LEAF(c)) {
1724 prefetch(p->child[idx]);
1725 return (struct leaf *) c;
1728 /* Rescan start scanning in new node */
1729 p = (struct tnode *) c;
1730 idx = 0;
1733 /* Node empty, walk back up to parent */
1734 c = (struct node *) p;
1735 } while ( (p = node_parent_rcu(c)) != NULL);
1737 return NULL; /* Root of trie */
1740 static struct leaf *trie_firstleaf(struct trie *t)
1742 struct tnode *n = (struct tnode *) rcu_dereference(t->trie);
1744 if (!n)
1745 return NULL;
1747 if (IS_LEAF(n)) /* trie is just a leaf */
1748 return (struct leaf *) n;
1750 return leaf_walk_rcu(n, NULL);
1753 static struct leaf *trie_nextleaf(struct leaf *l)
1755 struct node *c = (struct node *) l;
1756 struct tnode *p = node_parent(c);
1758 if (!p)
1759 return NULL; /* trie with just one leaf */
1761 return leaf_walk_rcu(p, c);
1764 static struct leaf *trie_leafindex(struct trie *t, int index)
1766 struct leaf *l = trie_firstleaf(t);
1768 while (l && index-- > 0)
1769 l = trie_nextleaf(l);
1771 return l;
1776 * Caller must hold RTNL.
1778 static int fn_trie_flush(struct fib_table *tb)
1780 struct trie *t = (struct trie *) tb->tb_data;
1781 struct leaf *l, *ll = NULL;
1782 int found = 0;
1784 for (l = trie_firstleaf(t); l; l = trie_nextleaf(l)) {
1785 found += trie_flush_leaf(t, l);
1787 if (ll && hlist_empty(&ll->list))
1788 trie_leaf_remove(t, ll);
1789 ll = l;
1792 if (ll && hlist_empty(&ll->list))
1793 trie_leaf_remove(t, ll);
1795 pr_debug("trie_flush found=%d\n", found);
1796 return found;
1799 static void fn_trie_select_default(struct fib_table *tb,
1800 const struct flowi *flp,
1801 struct fib_result *res)
1803 struct trie *t = (struct trie *) tb->tb_data;
1804 int order, last_idx;
1805 struct fib_info *fi = NULL;
1806 struct fib_info *last_resort;
1807 struct fib_alias *fa = NULL;
1808 struct list_head *fa_head;
1809 struct leaf *l;
1811 last_idx = -1;
1812 last_resort = NULL;
1813 order = -1;
1815 rcu_read_lock();
1817 l = fib_find_node(t, 0);
1818 if (!l)
1819 goto out;
1821 fa_head = get_fa_head(l, 0);
1822 if (!fa_head)
1823 goto out;
1825 if (list_empty(fa_head))
1826 goto out;
1828 list_for_each_entry_rcu(fa, fa_head, fa_list) {
1829 struct fib_info *next_fi = fa->fa_info;
1831 if (fa->fa_scope != res->scope ||
1832 fa->fa_type != RTN_UNICAST)
1833 continue;
1835 if (next_fi->fib_priority > res->fi->fib_priority)
1836 break;
1837 if (!next_fi->fib_nh[0].nh_gw ||
1838 next_fi->fib_nh[0].nh_scope != RT_SCOPE_LINK)
1839 continue;
1840 fa->fa_state |= FA_S_ACCESSED;
1842 if (fi == NULL) {
1843 if (next_fi != res->fi)
1844 break;
1845 } else if (!fib_detect_death(fi, order, &last_resort,
1846 &last_idx, tb->tb_default)) {
1847 fib_result_assign(res, fi);
1848 tb->tb_default = order;
1849 goto out;
1851 fi = next_fi;
1852 order++;
1854 if (order <= 0 || fi == NULL) {
1855 tb->tb_default = -1;
1856 goto out;
1859 if (!fib_detect_death(fi, order, &last_resort, &last_idx,
1860 tb->tb_default)) {
1861 fib_result_assign(res, fi);
1862 tb->tb_default = order;
1863 goto out;
1865 if (last_idx >= 0)
1866 fib_result_assign(res, last_resort);
1867 tb->tb_default = last_idx;
1868 out:
1869 rcu_read_unlock();
1872 static int fn_trie_dump_fa(t_key key, int plen, struct list_head *fah,
1873 struct fib_table *tb,
1874 struct sk_buff *skb, struct netlink_callback *cb)
1876 int i, s_i;
1877 struct fib_alias *fa;
1878 __be32 xkey = htonl(key);
1880 s_i = cb->args[5];
1881 i = 0;
1883 /* rcu_read_lock is hold by caller */
1885 list_for_each_entry_rcu(fa, fah, fa_list) {
1886 if (i < s_i) {
1887 i++;
1888 continue;
1891 if (fib_dump_info(skb, NETLINK_CB(cb->skb).pid,
1892 cb->nlh->nlmsg_seq,
1893 RTM_NEWROUTE,
1894 tb->tb_id,
1895 fa->fa_type,
1896 fa->fa_scope,
1897 xkey,
1898 plen,
1899 fa->fa_tos,
1900 fa->fa_info, NLM_F_MULTI) < 0) {
1901 cb->args[5] = i;
1902 return -1;
1904 i++;
1906 cb->args[5] = i;
1907 return skb->len;
1910 static int fn_trie_dump_leaf(struct leaf *l, struct fib_table *tb,
1911 struct sk_buff *skb, struct netlink_callback *cb)
1913 struct leaf_info *li;
1914 struct hlist_node *node;
1915 int i, s_i;
1917 s_i = cb->args[4];
1918 i = 0;
1920 /* rcu_read_lock is hold by caller */
1921 hlist_for_each_entry_rcu(li, node, &l->list, hlist) {
1922 if (i < s_i) {
1923 i++;
1924 continue;
1927 if (i > s_i)
1928 cb->args[5] = 0;
1930 if (list_empty(&li->falh))
1931 continue;
1933 if (fn_trie_dump_fa(l->key, li->plen, &li->falh, tb, skb, cb) < 0) {
1934 cb->args[4] = i;
1935 return -1;
1937 i++;
1940 cb->args[4] = i;
1941 return skb->len;
1944 static int fn_trie_dump(struct fib_table *tb, struct sk_buff *skb,
1945 struct netlink_callback *cb)
1947 struct leaf *l;
1948 struct trie *t = (struct trie *) tb->tb_data;
1949 t_key key = cb->args[2];
1950 int count = cb->args[3];
1952 rcu_read_lock();
1953 /* Dump starting at last key.
1954 * Note: 0.0.0.0/0 (ie default) is first key.
1956 if (count == 0)
1957 l = trie_firstleaf(t);
1958 else {
1959 /* Normally, continue from last key, but if that is missing
1960 * fallback to using slow rescan
1962 l = fib_find_node(t, key);
1963 if (!l)
1964 l = trie_leafindex(t, count);
1967 while (l) {
1968 cb->args[2] = l->key;
1969 if (fn_trie_dump_leaf(l, tb, skb, cb) < 0) {
1970 cb->args[3] = count;
1971 rcu_read_unlock();
1972 return -1;
1975 ++count;
1976 l = trie_nextleaf(l);
1977 memset(&cb->args[4], 0,
1978 sizeof(cb->args) - 4*sizeof(cb->args[0]));
1980 cb->args[3] = count;
1981 rcu_read_unlock();
1983 return skb->len;
1986 void __init fib_hash_init(void)
1988 fn_alias_kmem = kmem_cache_create("ip_fib_alias",
1989 sizeof(struct fib_alias),
1990 0, SLAB_PANIC, NULL);
1992 trie_leaf_kmem = kmem_cache_create("ip_fib_trie",
1993 max(sizeof(struct leaf),
1994 sizeof(struct leaf_info)),
1995 0, SLAB_PANIC, NULL);
1999 /* Fix more generic FIB names for init later */
2000 struct fib_table *fib_hash_table(u32 id)
2002 struct fib_table *tb;
2003 struct trie *t;
2005 tb = kmalloc(sizeof(struct fib_table) + sizeof(struct trie),
2006 GFP_KERNEL);
2007 if (tb == NULL)
2008 return NULL;
2010 tb->tb_id = id;
2011 tb->tb_default = -1;
2012 tb->tb_lookup = fn_trie_lookup;
2013 tb->tb_insert = fn_trie_insert;
2014 tb->tb_delete = fn_trie_delete;
2015 tb->tb_flush = fn_trie_flush;
2016 tb->tb_select_default = fn_trie_select_default;
2017 tb->tb_dump = fn_trie_dump;
2019 t = (struct trie *) tb->tb_data;
2020 memset(t, 0, sizeof(*t));
2022 if (id == RT_TABLE_LOCAL)
2023 pr_info("IPv4 FIB: Using LC-trie version %s\n", VERSION);
2025 return tb;
2028 #ifdef CONFIG_PROC_FS
2029 /* Depth first Trie walk iterator */
2030 struct fib_trie_iter {
2031 struct seq_net_private p;
2032 struct trie *trie_local, *trie_main;
2033 struct tnode *tnode;
2034 struct trie *trie;
2035 unsigned index;
2036 unsigned depth;
2039 static struct node *fib_trie_get_next(struct fib_trie_iter *iter)
2041 struct tnode *tn = iter->tnode;
2042 unsigned cindex = iter->index;
2043 struct tnode *p;
2045 /* A single entry routing table */
2046 if (!tn)
2047 return NULL;
2049 pr_debug("get_next iter={node=%p index=%d depth=%d}\n",
2050 iter->tnode, iter->index, iter->depth);
2051 rescan:
2052 while (cindex < (1<<tn->bits)) {
2053 struct node *n = tnode_get_child_rcu(tn, cindex);
2055 if (n) {
2056 if (IS_LEAF(n)) {
2057 iter->tnode = tn;
2058 iter->index = cindex + 1;
2059 } else {
2060 /* push down one level */
2061 iter->tnode = (struct tnode *) n;
2062 iter->index = 0;
2063 ++iter->depth;
2065 return n;
2068 ++cindex;
2071 /* Current node exhausted, pop back up */
2072 p = node_parent_rcu((struct node *)tn);
2073 if (p) {
2074 cindex = tkey_extract_bits(tn->key, p->pos, p->bits)+1;
2075 tn = p;
2076 --iter->depth;
2077 goto rescan;
2080 /* got root? */
2081 return NULL;
2084 static struct node *fib_trie_get_first(struct fib_trie_iter *iter,
2085 struct trie *t)
2087 struct node *n ;
2089 if (!t)
2090 return NULL;
2092 n = rcu_dereference(t->trie);
2094 if (!iter)
2095 return NULL;
2097 if (n) {
2098 if (IS_TNODE(n)) {
2099 iter->tnode = (struct tnode *) n;
2100 iter->trie = t;
2101 iter->index = 0;
2102 iter->depth = 1;
2103 } else {
2104 iter->tnode = NULL;
2105 iter->trie = t;
2106 iter->index = 0;
2107 iter->depth = 0;
2109 return n;
2111 return NULL;
2114 static void trie_collect_stats(struct trie *t, struct trie_stat *s)
2116 struct node *n;
2117 struct fib_trie_iter iter;
2119 memset(s, 0, sizeof(*s));
2121 rcu_read_lock();
2122 for (n = fib_trie_get_first(&iter, t); n;
2123 n = fib_trie_get_next(&iter)) {
2124 if (IS_LEAF(n)) {
2125 struct leaf *l = (struct leaf *)n;
2126 struct leaf_info *li;
2127 struct hlist_node *tmp;
2129 s->leaves++;
2130 s->totdepth += iter.depth;
2131 if (iter.depth > s->maxdepth)
2132 s->maxdepth = iter.depth;
2134 hlist_for_each_entry_rcu(li, tmp, &l->list, hlist)
2135 ++s->prefixes;
2136 } else {
2137 const struct tnode *tn = (const struct tnode *) n;
2138 int i;
2140 s->tnodes++;
2141 if (tn->bits < MAX_STAT_DEPTH)
2142 s->nodesizes[tn->bits]++;
2144 for (i = 0; i < (1<<tn->bits); i++)
2145 if (!tn->child[i])
2146 s->nullpointers++;
2149 rcu_read_unlock();
2153 * This outputs /proc/net/fib_triestats
2155 static void trie_show_stats(struct seq_file *seq, struct trie_stat *stat)
2157 unsigned i, max, pointers, bytes, avdepth;
2159 if (stat->leaves)
2160 avdepth = stat->totdepth*100 / stat->leaves;
2161 else
2162 avdepth = 0;
2164 seq_printf(seq, "\tAver depth: %u.%02d\n",
2165 avdepth / 100, avdepth % 100);
2166 seq_printf(seq, "\tMax depth: %u\n", stat->maxdepth);
2168 seq_printf(seq, "\tLeaves: %u\n", stat->leaves);
2169 bytes = sizeof(struct leaf) * stat->leaves;
2171 seq_printf(seq, "\tPrefixes: %u\n", stat->prefixes);
2172 bytes += sizeof(struct leaf_info) * stat->prefixes;
2174 seq_printf(seq, "\tInternal nodes: %u\n\t", stat->tnodes);
2175 bytes += sizeof(struct tnode) * stat->tnodes;
2177 max = MAX_STAT_DEPTH;
2178 while (max > 0 && stat->nodesizes[max-1] == 0)
2179 max--;
2181 pointers = 0;
2182 for (i = 1; i <= max; i++)
2183 if (stat->nodesizes[i] != 0) {
2184 seq_printf(seq, " %u: %u", i, stat->nodesizes[i]);
2185 pointers += (1<<i) * stat->nodesizes[i];
2187 seq_putc(seq, '\n');
2188 seq_printf(seq, "\tPointers: %u\n", pointers);
2190 bytes += sizeof(struct node *) * pointers;
2191 seq_printf(seq, "Null ptrs: %u\n", stat->nullpointers);
2192 seq_printf(seq, "Total size: %u kB\n", (bytes + 1023) / 1024);
2195 #ifdef CONFIG_IP_FIB_TRIE_STATS
2196 static void trie_show_usage(struct seq_file *seq,
2197 const struct trie_use_stats *stats)
2199 seq_printf(seq, "\nCounters:\n---------\n");
2200 seq_printf(seq, "gets = %u\n", stats->gets);
2201 seq_printf(seq, "backtracks = %u\n", stats->backtrack);
2202 seq_printf(seq, "semantic match passed = %u\n",
2203 stats->semantic_match_passed);
2204 seq_printf(seq, "semantic match miss = %u\n",
2205 stats->semantic_match_miss);
2206 seq_printf(seq, "null node hit= %u\n", stats->null_node_hit);
2207 seq_printf(seq, "skipped node resize = %u\n\n",
2208 stats->resize_node_skipped);
2210 #endif /* CONFIG_IP_FIB_TRIE_STATS */
2212 static void fib_trie_show(struct seq_file *seq, const char *name,
2213 struct trie *trie)
2215 struct trie_stat stat;
2217 trie_collect_stats(trie, &stat);
2218 seq_printf(seq, "%s:\n", name);
2219 trie_show_stats(seq, &stat);
2220 #ifdef CONFIG_IP_FIB_TRIE_STATS
2221 trie_show_usage(seq, &trie->stats);
2222 #endif
2225 static int fib_triestat_seq_show(struct seq_file *seq, void *v)
2227 struct net *net = (struct net *)seq->private;
2228 struct fib_table *tb;
2230 seq_printf(seq,
2231 "Basic info: size of leaf:"
2232 " %Zd bytes, size of tnode: %Zd bytes.\n",
2233 sizeof(struct leaf), sizeof(struct tnode));
2235 tb = fib_get_table(net, RT_TABLE_LOCAL);
2236 if (tb)
2237 fib_trie_show(seq, "Local", (struct trie *) tb->tb_data);
2239 tb = fib_get_table(net, RT_TABLE_MAIN);
2240 if (tb)
2241 fib_trie_show(seq, "Main", (struct trie *) tb->tb_data);
2243 return 0;
2246 static int fib_triestat_seq_open(struct inode *inode, struct file *file)
2248 int err;
2249 struct net *net;
2251 net = get_proc_net(inode);
2252 if (net == NULL)
2253 return -ENXIO;
2254 err = single_open(file, fib_triestat_seq_show, net);
2255 if (err < 0) {
2256 put_net(net);
2257 return err;
2259 return 0;
2262 static int fib_triestat_seq_release(struct inode *ino, struct file *f)
2264 struct seq_file *seq = f->private_data;
2265 put_net(seq->private);
2266 return single_release(ino, f);
2269 static const struct file_operations fib_triestat_fops = {
2270 .owner = THIS_MODULE,
2271 .open = fib_triestat_seq_open,
2272 .read = seq_read,
2273 .llseek = seq_lseek,
2274 .release = fib_triestat_seq_release,
2277 static struct node *fib_trie_get_idx(struct fib_trie_iter *iter,
2278 loff_t pos)
2280 loff_t idx = 0;
2281 struct node *n;
2283 for (n = fib_trie_get_first(iter, iter->trie_local);
2284 n; ++idx, n = fib_trie_get_next(iter)) {
2285 if (pos == idx)
2286 return n;
2289 for (n = fib_trie_get_first(iter, iter->trie_main);
2290 n; ++idx, n = fib_trie_get_next(iter)) {
2291 if (pos == idx)
2292 return n;
2294 return NULL;
2297 static void *fib_trie_seq_start(struct seq_file *seq, loff_t *pos)
2298 __acquires(RCU)
2300 struct fib_trie_iter *iter = seq->private;
2301 struct fib_table *tb;
2303 if (!iter->trie_local) {
2304 tb = fib_get_table(iter->p.net, RT_TABLE_LOCAL);
2305 if (tb)
2306 iter->trie_local = (struct trie *) tb->tb_data;
2308 if (!iter->trie_main) {
2309 tb = fib_get_table(iter->p.net, RT_TABLE_MAIN);
2310 if (tb)
2311 iter->trie_main = (struct trie *) tb->tb_data;
2313 rcu_read_lock();
2314 if (*pos == 0)
2315 return SEQ_START_TOKEN;
2316 return fib_trie_get_idx(iter, *pos - 1);
2319 static void *fib_trie_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2321 struct fib_trie_iter *iter = seq->private;
2322 void *l = v;
2324 ++*pos;
2325 if (v == SEQ_START_TOKEN)
2326 return fib_trie_get_idx(iter, 0);
2328 v = fib_trie_get_next(iter);
2329 BUG_ON(v == l);
2330 if (v)
2331 return v;
2333 /* continue scan in next trie */
2334 if (iter->trie == iter->trie_local)
2335 return fib_trie_get_first(iter, iter->trie_main);
2337 return NULL;
2340 static void fib_trie_seq_stop(struct seq_file *seq, void *v)
2341 __releases(RCU)
2343 rcu_read_unlock();
2346 static void seq_indent(struct seq_file *seq, int n)
2348 while (n-- > 0) seq_puts(seq, " ");
2351 static inline const char *rtn_scope(char *buf, size_t len, enum rt_scope_t s)
2353 switch (s) {
2354 case RT_SCOPE_UNIVERSE: return "universe";
2355 case RT_SCOPE_SITE: return "site";
2356 case RT_SCOPE_LINK: return "link";
2357 case RT_SCOPE_HOST: return "host";
2358 case RT_SCOPE_NOWHERE: return "nowhere";
2359 default:
2360 snprintf(buf, len, "scope=%d", s);
2361 return buf;
2365 static const char *rtn_type_names[__RTN_MAX] = {
2366 [RTN_UNSPEC] = "UNSPEC",
2367 [RTN_UNICAST] = "UNICAST",
2368 [RTN_LOCAL] = "LOCAL",
2369 [RTN_BROADCAST] = "BROADCAST",
2370 [RTN_ANYCAST] = "ANYCAST",
2371 [RTN_MULTICAST] = "MULTICAST",
2372 [RTN_BLACKHOLE] = "BLACKHOLE",
2373 [RTN_UNREACHABLE] = "UNREACHABLE",
2374 [RTN_PROHIBIT] = "PROHIBIT",
2375 [RTN_THROW] = "THROW",
2376 [RTN_NAT] = "NAT",
2377 [RTN_XRESOLVE] = "XRESOLVE",
2380 static inline const char *rtn_type(char *buf, size_t len, unsigned t)
2382 if (t < __RTN_MAX && rtn_type_names[t])
2383 return rtn_type_names[t];
2384 snprintf(buf, len, "type %u", t);
2385 return buf;
2388 /* Pretty print the trie */
2389 static int fib_trie_seq_show(struct seq_file *seq, void *v)
2391 const struct fib_trie_iter *iter = seq->private;
2392 struct node *n = v;
2394 if (v == SEQ_START_TOKEN)
2395 return 0;
2397 if (!node_parent_rcu(n)) {
2398 if (iter->trie == iter->trie_local)
2399 seq_puts(seq, "<local>:\n");
2400 else
2401 seq_puts(seq, "<main>:\n");
2404 if (IS_TNODE(n)) {
2405 struct tnode *tn = (struct tnode *) n;
2406 __be32 prf = htonl(mask_pfx(tn->key, tn->pos));
2408 seq_indent(seq, iter->depth-1);
2409 seq_printf(seq, " +-- %d.%d.%d.%d/%d %d %d %d\n",
2410 NIPQUAD(prf), tn->pos, tn->bits, tn->full_children,
2411 tn->empty_children);
2413 } else {
2414 struct leaf *l = (struct leaf *) n;
2415 struct leaf_info *li;
2416 struct hlist_node *node;
2417 __be32 val = htonl(l->key);
2419 seq_indent(seq, iter->depth);
2420 seq_printf(seq, " |-- %d.%d.%d.%d\n", NIPQUAD(val));
2422 hlist_for_each_entry_rcu(li, node, &l->list, hlist) {
2423 struct fib_alias *fa;
2425 list_for_each_entry_rcu(fa, &li->falh, fa_list) {
2426 char buf1[32], buf2[32];
2428 seq_indent(seq, iter->depth+1);
2429 seq_printf(seq, " /%d %s %s", li->plen,
2430 rtn_scope(buf1, sizeof(buf1),
2431 fa->fa_scope),
2432 rtn_type(buf2, sizeof(buf2),
2433 fa->fa_type));
2434 if (fa->fa_tos)
2435 seq_printf(seq, " tos=%d", fa->fa_tos);
2436 seq_putc(seq, '\n');
2441 return 0;
2444 static const struct seq_operations fib_trie_seq_ops = {
2445 .start = fib_trie_seq_start,
2446 .next = fib_trie_seq_next,
2447 .stop = fib_trie_seq_stop,
2448 .show = fib_trie_seq_show,
2451 static int fib_trie_seq_open(struct inode *inode, struct file *file)
2453 return seq_open_net(inode, file, &fib_trie_seq_ops,
2454 sizeof(struct fib_trie_iter));
2457 static const struct file_operations fib_trie_fops = {
2458 .owner = THIS_MODULE,
2459 .open = fib_trie_seq_open,
2460 .read = seq_read,
2461 .llseek = seq_lseek,
2462 .release = seq_release_net,
2465 struct fib_route_iter {
2466 struct seq_net_private p;
2467 struct trie *main_trie;
2468 loff_t pos;
2469 t_key key;
2472 static struct leaf *fib_route_get_idx(struct fib_route_iter *iter, loff_t pos)
2474 struct leaf *l = NULL;
2475 struct trie *t = iter->main_trie;
2477 /* use cache location of last found key */
2478 if (iter->pos > 0 && pos >= iter->pos && (l = fib_find_node(t, iter->key)))
2479 pos -= iter->pos;
2480 else {
2481 iter->pos = 0;
2482 l = trie_firstleaf(t);
2485 while (l && pos-- > 0) {
2486 iter->pos++;
2487 l = trie_nextleaf(l);
2490 if (l)
2491 iter->key = pos; /* remember it */
2492 else
2493 iter->pos = 0; /* forget it */
2495 return l;
2498 static void *fib_route_seq_start(struct seq_file *seq, loff_t *pos)
2499 __acquires(RCU)
2501 struct fib_route_iter *iter = seq->private;
2502 struct fib_table *tb;
2504 rcu_read_lock();
2505 tb = fib_get_table(iter->p.net, RT_TABLE_MAIN);
2506 if (!tb)
2507 return NULL;
2509 iter->main_trie = (struct trie *) tb->tb_data;
2510 if (*pos == 0)
2511 return SEQ_START_TOKEN;
2512 else
2513 return fib_route_get_idx(iter, *pos - 1);
2516 static void *fib_route_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2518 struct fib_route_iter *iter = seq->private;
2519 struct leaf *l = v;
2521 ++*pos;
2522 if (v == SEQ_START_TOKEN) {
2523 iter->pos = 0;
2524 l = trie_firstleaf(iter->main_trie);
2525 } else {
2526 iter->pos++;
2527 l = trie_nextleaf(l);
2530 if (l)
2531 iter->key = l->key;
2532 else
2533 iter->pos = 0;
2534 return l;
2537 static void fib_route_seq_stop(struct seq_file *seq, void *v)
2538 __releases(RCU)
2540 rcu_read_unlock();
2543 static unsigned fib_flag_trans(int type, __be32 mask, const struct fib_info *fi)
2545 static unsigned type2flags[RTN_MAX + 1] = {
2546 [7] = RTF_REJECT, [8] = RTF_REJECT,
2548 unsigned flags = type2flags[type];
2550 if (fi && fi->fib_nh->nh_gw)
2551 flags |= RTF_GATEWAY;
2552 if (mask == htonl(0xFFFFFFFF))
2553 flags |= RTF_HOST;
2554 flags |= RTF_UP;
2555 return flags;
2559 * This outputs /proc/net/route.
2560 * The format of the file is not supposed to be changed
2561 * and needs to be same as fib_hash output to avoid breaking
2562 * legacy utilities
2564 static int fib_route_seq_show(struct seq_file *seq, void *v)
2566 struct leaf *l = v;
2567 struct leaf_info *li;
2568 struct hlist_node *node;
2570 if (v == SEQ_START_TOKEN) {
2571 seq_printf(seq, "%-127s\n", "Iface\tDestination\tGateway "
2572 "\tFlags\tRefCnt\tUse\tMetric\tMask\t\tMTU"
2573 "\tWindow\tIRTT");
2574 return 0;
2577 hlist_for_each_entry_rcu(li, node, &l->list, hlist) {
2578 struct fib_alias *fa;
2579 __be32 mask, prefix;
2581 mask = inet_make_mask(li->plen);
2582 prefix = htonl(l->key);
2584 list_for_each_entry_rcu(fa, &li->falh, fa_list) {
2585 const struct fib_info *fi = fa->fa_info;
2586 unsigned flags = fib_flag_trans(fa->fa_type, mask, fi);
2587 char bf[128];
2589 if (fa->fa_type == RTN_BROADCAST
2590 || fa->fa_type == RTN_MULTICAST)
2591 continue;
2593 if (fi)
2594 snprintf(bf, sizeof(bf),
2595 "%s\t%08X\t%08X\t%04X\t%d\t%u\t%d\t%08X\t%d\t%u\t%u",
2596 fi->fib_dev ? fi->fib_dev->name : "*",
2597 prefix,
2598 fi->fib_nh->nh_gw, flags, 0, 0,
2599 fi->fib_priority,
2600 mask,
2601 (fi->fib_advmss ?
2602 fi->fib_advmss + 40 : 0),
2603 fi->fib_window,
2604 fi->fib_rtt >> 3);
2605 else
2606 snprintf(bf, sizeof(bf),
2607 "*\t%08X\t%08X\t%04X\t%d\t%u\t%d\t%08X\t%d\t%u\t%u",
2608 prefix, 0, flags, 0, 0, 0,
2609 mask, 0, 0, 0);
2611 seq_printf(seq, "%-127s\n", bf);
2615 return 0;
2618 static const struct seq_operations fib_route_seq_ops = {
2619 .start = fib_route_seq_start,
2620 .next = fib_route_seq_next,
2621 .stop = fib_route_seq_stop,
2622 .show = fib_route_seq_show,
2625 static int fib_route_seq_open(struct inode *inode, struct file *file)
2627 return seq_open_net(inode, file, &fib_route_seq_ops,
2628 sizeof(struct fib_route_iter));
2631 static const struct file_operations fib_route_fops = {
2632 .owner = THIS_MODULE,
2633 .open = fib_route_seq_open,
2634 .read = seq_read,
2635 .llseek = seq_lseek,
2636 .release = seq_release_net,
2639 int __net_init fib_proc_init(struct net *net)
2641 if (!proc_net_fops_create(net, "fib_trie", S_IRUGO, &fib_trie_fops))
2642 goto out1;
2644 if (!proc_net_fops_create(net, "fib_triestat", S_IRUGO,
2645 &fib_triestat_fops))
2646 goto out2;
2648 if (!proc_net_fops_create(net, "route", S_IRUGO, &fib_route_fops))
2649 goto out3;
2651 return 0;
2653 out3:
2654 proc_net_remove(net, "fib_triestat");
2655 out2:
2656 proc_net_remove(net, "fib_trie");
2657 out1:
2658 return -ENOMEM;
2661 void __net_exit fib_proc_exit(struct net *net)
2663 proc_net_remove(net, "fib_trie");
2664 proc_net_remove(net, "fib_triestat");
2665 proc_net_remove(net, "route");
2668 #endif /* CONFIG_PROC_FS */