[IPV4] fib_trie: style cleanup
[linux-2.6.git] / net / ipv4 / fib_trie.c
blob899210b3c6b7c7b7aeefbc525fec6ba6ea9985ca
1 /*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version
5 * 2 of the License, or (at your option) any later version.
7 * Robert Olsson <robert.olsson@its.uu.se> Uppsala Universitet
8 * & Swedish University of Agricultural Sciences.
10 * Jens Laas <jens.laas@data.slu.se> Swedish University of
11 * Agricultural Sciences.
13 * Hans Liss <hans.liss@its.uu.se> Uppsala Universitet
15 * This work is based on the LPC-trie which is originally descibed in:
17 * An experimental study of compression methods for dynamic tries
18 * Stefan Nilsson and Matti Tikkanen. Algorithmica, 33(1):19-33, 2002.
19 * http://www.nada.kth.se/~snilsson/public/papers/dyntrie2/
22 * IP-address lookup using LC-tries. Stefan Nilsson and Gunnar Karlsson
23 * IEEE Journal on Selected Areas in Communications, 17(6):1083-1092, June 1999
25 * Version: $Id: fib_trie.c,v 1.3 2005/06/08 14:20:01 robert Exp $
28 * Code from fib_hash has been reused which includes the following header:
31 * INET An implementation of the TCP/IP protocol suite for the LINUX
32 * operating system. INET is implemented using the BSD Socket
33 * interface as the means of communication with the user level.
35 * IPv4 FIB: lookup engine and maintenance routines.
38 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
40 * This program is free software; you can redistribute it and/or
41 * modify it under the terms of the GNU General Public License
42 * as published by the Free Software Foundation; either version
43 * 2 of the License, or (at your option) any later version.
45 * Substantial contributions to this work comes from:
47 * David S. Miller, <davem@davemloft.net>
48 * Stephen Hemminger <shemminger@osdl.org>
49 * Paul E. McKenney <paulmck@us.ibm.com>
50 * Patrick McHardy <kaber@trash.net>
53 #define VERSION "0.408"
55 #include <asm/uaccess.h>
56 #include <asm/system.h>
57 #include <linux/bitops.h>
58 #include <linux/types.h>
59 #include <linux/kernel.h>
60 #include <linux/mm.h>
61 #include <linux/string.h>
62 #include <linux/socket.h>
63 #include <linux/sockios.h>
64 #include <linux/errno.h>
65 #include <linux/in.h>
66 #include <linux/inet.h>
67 #include <linux/inetdevice.h>
68 #include <linux/netdevice.h>
69 #include <linux/if_arp.h>
70 #include <linux/proc_fs.h>
71 #include <linux/rcupdate.h>
72 #include <linux/skbuff.h>
73 #include <linux/netlink.h>
74 #include <linux/init.h>
75 #include <linux/list.h>
76 #include <net/net_namespace.h>
77 #include <net/ip.h>
78 #include <net/protocol.h>
79 #include <net/route.h>
80 #include <net/tcp.h>
81 #include <net/sock.h>
82 #include <net/ip_fib.h>
83 #include "fib_lookup.h"
85 #define MAX_STAT_DEPTH 32
87 #define KEYLENGTH (8*sizeof(t_key))
89 typedef unsigned int t_key;
91 #define T_TNODE 0
92 #define T_LEAF 1
93 #define NODE_TYPE_MASK 0x1UL
94 #define NODE_TYPE(node) ((node)->parent & NODE_TYPE_MASK)
96 #define IS_TNODE(n) (!(n->parent & T_LEAF))
97 #define IS_LEAF(n) (n->parent & T_LEAF)
99 struct node {
100 unsigned long parent;
101 t_key key;
104 struct leaf {
105 unsigned long parent;
106 t_key key;
107 struct hlist_head list;
108 struct rcu_head rcu;
111 struct leaf_info {
112 struct hlist_node hlist;
113 struct rcu_head rcu;
114 int plen;
115 struct list_head falh;
118 struct tnode {
119 unsigned long parent;
120 t_key key;
121 unsigned char pos; /* 2log(KEYLENGTH) bits needed */
122 unsigned char bits; /* 2log(KEYLENGTH) bits needed */
123 unsigned int full_children; /* KEYLENGTH bits needed */
124 unsigned int empty_children; /* KEYLENGTH bits needed */
125 struct rcu_head rcu;
126 struct node *child[0];
129 #ifdef CONFIG_IP_FIB_TRIE_STATS
130 struct trie_use_stats {
131 unsigned int gets;
132 unsigned int backtrack;
133 unsigned int semantic_match_passed;
134 unsigned int semantic_match_miss;
135 unsigned int null_node_hit;
136 unsigned int resize_node_skipped;
138 #endif
140 struct trie_stat {
141 unsigned int totdepth;
142 unsigned int maxdepth;
143 unsigned int tnodes;
144 unsigned int leaves;
145 unsigned int nullpointers;
146 unsigned int nodesizes[MAX_STAT_DEPTH];
149 struct trie {
150 struct node *trie;
151 unsigned int size;
152 #ifdef CONFIG_IP_FIB_TRIE_STATS
153 struct trie_use_stats stats;
154 #endif
157 static void put_child(struct trie *t, struct tnode *tn, int i, struct node *n);
158 static void tnode_put_child_reorg(struct tnode *tn, int i, struct node *n,
159 int wasfull);
160 static struct node *resize(struct trie *t, struct tnode *tn);
161 static struct tnode *inflate(struct trie *t, struct tnode *tn);
162 static struct tnode *halve(struct trie *t, struct tnode *tn);
163 static void tnode_free(struct tnode *tn);
165 static struct kmem_cache *fn_alias_kmem __read_mostly;
166 static struct kmem_cache *trie_leaf_kmem __read_mostly;
168 static inline struct tnode *node_parent(struct node *node)
170 return (struct tnode *)(node->parent & ~NODE_TYPE_MASK);
173 static inline struct tnode *node_parent_rcu(struct node *node)
175 struct tnode *ret = node_parent(node);
177 return rcu_dereference(ret);
180 static inline void node_set_parent(struct node *node, struct tnode *ptr)
182 rcu_assign_pointer(node->parent,
183 (unsigned long)ptr | NODE_TYPE(node));
186 static inline struct node *tnode_get_child(struct tnode *tn, unsigned int i)
188 BUG_ON(i >= 1U << tn->bits);
190 return tn->child[i];
193 static inline struct node *tnode_get_child_rcu(struct tnode *tn, unsigned int i)
195 struct node *ret = tnode_get_child(tn, i);
197 return rcu_dereference(ret);
200 static inline int tnode_child_length(const struct tnode *tn)
202 return 1 << tn->bits;
205 static inline t_key mask_pfx(t_key k, unsigned short l)
207 return (l == 0) ? 0 : k >> (KEYLENGTH-l) << (KEYLENGTH-l);
210 static inline t_key tkey_extract_bits(t_key a, int offset, int bits)
212 if (offset < KEYLENGTH)
213 return ((t_key)(a << offset)) >> (KEYLENGTH - bits);
214 else
215 return 0;
218 static inline int tkey_equals(t_key a, t_key b)
220 return a == b;
223 static inline int tkey_sub_equals(t_key a, int offset, int bits, t_key b)
225 if (bits == 0 || offset >= KEYLENGTH)
226 return 1;
227 bits = bits > KEYLENGTH ? KEYLENGTH : bits;
228 return ((a ^ b) << offset) >> (KEYLENGTH - bits) == 0;
231 static inline int tkey_mismatch(t_key a, int offset, t_key b)
233 t_key diff = a ^ b;
234 int i = offset;
236 if (!diff)
237 return 0;
238 while ((diff << i) >> (KEYLENGTH-1) == 0)
239 i++;
240 return i;
244 To understand this stuff, an understanding of keys and all their bits is
245 necessary. Every node in the trie has a key associated with it, but not
246 all of the bits in that key are significant.
248 Consider a node 'n' and its parent 'tp'.
250 If n is a leaf, every bit in its key is significant. Its presence is
251 necessitated by path compression, since during a tree traversal (when
252 searching for a leaf - unless we are doing an insertion) we will completely
253 ignore all skipped bits we encounter. Thus we need to verify, at the end of
254 a potentially successful search, that we have indeed been walking the
255 correct key path.
257 Note that we can never "miss" the correct key in the tree if present by
258 following the wrong path. Path compression ensures that segments of the key
259 that are the same for all keys with a given prefix are skipped, but the
260 skipped part *is* identical for each node in the subtrie below the skipped
261 bit! trie_insert() in this implementation takes care of that - note the
262 call to tkey_sub_equals() in trie_insert().
264 if n is an internal node - a 'tnode' here, the various parts of its key
265 have many different meanings.
267 Example:
268 _________________________________________________________________
269 | i | i | i | i | i | i | i | N | N | N | S | S | S | S | S | C |
270 -----------------------------------------------------------------
271 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
273 _________________________________________________________________
274 | C | C | C | u | u | u | u | u | u | u | u | u | u | u | u | u |
275 -----------------------------------------------------------------
276 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
278 tp->pos = 7
279 tp->bits = 3
280 n->pos = 15
281 n->bits = 4
283 First, let's just ignore the bits that come before the parent tp, that is
284 the bits from 0 to (tp->pos-1). They are *known* but at this point we do
285 not use them for anything.
287 The bits from (tp->pos) to (tp->pos + tp->bits - 1) - "N", above - are the
288 index into the parent's child array. That is, they will be used to find
289 'n' among tp's children.
291 The bits from (tp->pos + tp->bits) to (n->pos - 1) - "S" - are skipped bits
292 for the node n.
294 All the bits we have seen so far are significant to the node n. The rest
295 of the bits are really not needed or indeed known in n->key.
297 The bits from (n->pos) to (n->pos + n->bits - 1) - "C" - are the index into
298 n's child array, and will of course be different for each child.
301 The rest of the bits, from (n->pos + n->bits) onward, are completely unknown
302 at this point.
306 static inline void check_tnode(const struct tnode *tn)
308 WARN_ON(tn && tn->pos+tn->bits > 32);
311 static const int halve_threshold = 25;
312 static const int inflate_threshold = 50;
313 static const int halve_threshold_root = 8;
314 static const int inflate_threshold_root = 15;
317 static void __alias_free_mem(struct rcu_head *head)
319 struct fib_alias *fa = container_of(head, struct fib_alias, rcu);
320 kmem_cache_free(fn_alias_kmem, fa);
323 static inline void alias_free_mem_rcu(struct fib_alias *fa)
325 call_rcu(&fa->rcu, __alias_free_mem);
328 static void __leaf_free_rcu(struct rcu_head *head)
330 struct leaf *l = container_of(head, struct leaf, rcu);
331 kmem_cache_free(trie_leaf_kmem, l);
334 static void __leaf_info_free_rcu(struct rcu_head *head)
336 kfree(container_of(head, struct leaf_info, rcu));
339 static inline void free_leaf_info(struct leaf_info *leaf)
341 call_rcu(&leaf->rcu, __leaf_info_free_rcu);
344 static struct tnode *tnode_alloc(size_t size)
346 struct page *pages;
348 if (size <= PAGE_SIZE)
349 return kzalloc(size, GFP_KERNEL);
351 pages = alloc_pages(GFP_KERNEL|__GFP_ZERO, get_order(size));
352 if (!pages)
353 return NULL;
355 return page_address(pages);
358 static void __tnode_free_rcu(struct rcu_head *head)
360 struct tnode *tn = container_of(head, struct tnode, rcu);
361 size_t size = sizeof(struct tnode) +
362 (sizeof(struct node *) << tn->bits);
364 if (size <= PAGE_SIZE)
365 kfree(tn);
366 else
367 free_pages((unsigned long)tn, get_order(size));
370 static inline void tnode_free(struct tnode *tn)
372 if (IS_LEAF(tn)) {
373 struct leaf *l = (struct leaf *) tn;
374 call_rcu_bh(&l->rcu, __leaf_free_rcu);
375 } else
376 call_rcu(&tn->rcu, __tnode_free_rcu);
379 static struct leaf *leaf_new(void)
381 struct leaf *l = kmem_cache_alloc(trie_leaf_kmem, GFP_KERNEL);
382 if (l) {
383 l->parent = T_LEAF;
384 INIT_HLIST_HEAD(&l->list);
386 return l;
389 static struct leaf_info *leaf_info_new(int plen)
391 struct leaf_info *li = kmalloc(sizeof(struct leaf_info), GFP_KERNEL);
392 if (li) {
393 li->plen = plen;
394 INIT_LIST_HEAD(&li->falh);
396 return li;
399 static struct tnode *tnode_new(t_key key, int pos, int bits)
401 size_t sz = sizeof(struct tnode) + (sizeof(struct node *) << bits);
402 struct tnode *tn = tnode_alloc(sz);
404 if (tn) {
405 tn->parent = T_TNODE;
406 tn->pos = pos;
407 tn->bits = bits;
408 tn->key = key;
409 tn->full_children = 0;
410 tn->empty_children = 1<<bits;
413 pr_debug("AT %p s=%u %lu\n", tn, (unsigned int) sizeof(struct tnode),
414 (unsigned long) (sizeof(struct node) << bits));
415 return tn;
419 * Check whether a tnode 'n' is "full", i.e. it is an internal node
420 * and no bits are skipped. See discussion in dyntree paper p. 6
423 static inline int tnode_full(const struct tnode *tn, const struct node *n)
425 if (n == NULL || IS_LEAF(n))
426 return 0;
428 return ((struct tnode *) n)->pos == tn->pos + tn->bits;
431 static inline void put_child(struct trie *t, struct tnode *tn, int i,
432 struct node *n)
434 tnode_put_child_reorg(tn, i, n, -1);
438 * Add a child at position i overwriting the old value.
439 * Update the value of full_children and empty_children.
442 static void tnode_put_child_reorg(struct tnode *tn, int i, struct node *n,
443 int wasfull)
445 struct node *chi = tn->child[i];
446 int isfull;
448 BUG_ON(i >= 1<<tn->bits);
451 /* update emptyChildren */
452 if (n == NULL && chi != NULL)
453 tn->empty_children++;
454 else if (n != NULL && chi == NULL)
455 tn->empty_children--;
457 /* update fullChildren */
458 if (wasfull == -1)
459 wasfull = tnode_full(tn, chi);
461 isfull = tnode_full(tn, n);
462 if (wasfull && !isfull)
463 tn->full_children--;
464 else if (!wasfull && isfull)
465 tn->full_children++;
467 if (n)
468 node_set_parent(n, tn);
470 rcu_assign_pointer(tn->child[i], n);
473 static struct node *resize(struct trie *t, struct tnode *tn)
475 int i;
476 int err = 0;
477 struct tnode *old_tn;
478 int inflate_threshold_use;
479 int halve_threshold_use;
480 int max_resize;
482 if (!tn)
483 return NULL;
485 pr_debug("In tnode_resize %p inflate_threshold=%d threshold=%d\n",
486 tn, inflate_threshold, halve_threshold);
488 /* No children */
489 if (tn->empty_children == tnode_child_length(tn)) {
490 tnode_free(tn);
491 return NULL;
493 /* One child */
494 if (tn->empty_children == tnode_child_length(tn) - 1)
495 for (i = 0; i < tnode_child_length(tn); i++) {
496 struct node *n;
498 n = tn->child[i];
499 if (!n)
500 continue;
502 /* compress one level */
503 node_set_parent(n, NULL);
504 tnode_free(tn);
505 return n;
508 * Double as long as the resulting node has a number of
509 * nonempty nodes that are above the threshold.
513 * From "Implementing a dynamic compressed trie" by Stefan Nilsson of
514 * the Helsinki University of Technology and Matti Tikkanen of Nokia
515 * Telecommunications, page 6:
516 * "A node is doubled if the ratio of non-empty children to all
517 * children in the *doubled* node is at least 'high'."
519 * 'high' in this instance is the variable 'inflate_threshold'. It
520 * is expressed as a percentage, so we multiply it with
521 * tnode_child_length() and instead of multiplying by 2 (since the
522 * child array will be doubled by inflate()) and multiplying
523 * the left-hand side by 100 (to handle the percentage thing) we
524 * multiply the left-hand side by 50.
526 * The left-hand side may look a bit weird: tnode_child_length(tn)
527 * - tn->empty_children is of course the number of non-null children
528 * in the current node. tn->full_children is the number of "full"
529 * children, that is non-null tnodes with a skip value of 0.
530 * All of those will be doubled in the resulting inflated tnode, so
531 * we just count them one extra time here.
533 * A clearer way to write this would be:
535 * to_be_doubled = tn->full_children;
536 * not_to_be_doubled = tnode_child_length(tn) - tn->empty_children -
537 * tn->full_children;
539 * new_child_length = tnode_child_length(tn) * 2;
541 * new_fill_factor = 100 * (not_to_be_doubled + 2*to_be_doubled) /
542 * new_child_length;
543 * if (new_fill_factor >= inflate_threshold)
545 * ...and so on, tho it would mess up the while () loop.
547 * anyway,
548 * 100 * (not_to_be_doubled + 2*to_be_doubled) / new_child_length >=
549 * inflate_threshold
551 * avoid a division:
552 * 100 * (not_to_be_doubled + 2*to_be_doubled) >=
553 * inflate_threshold * new_child_length
555 * expand not_to_be_doubled and to_be_doubled, and shorten:
556 * 100 * (tnode_child_length(tn) - tn->empty_children +
557 * tn->full_children) >= inflate_threshold * new_child_length
559 * expand new_child_length:
560 * 100 * (tnode_child_length(tn) - tn->empty_children +
561 * tn->full_children) >=
562 * inflate_threshold * tnode_child_length(tn) * 2
564 * shorten again:
565 * 50 * (tn->full_children + tnode_child_length(tn) -
566 * tn->empty_children) >= inflate_threshold *
567 * tnode_child_length(tn)
571 check_tnode(tn);
573 /* Keep root node larger */
575 if (!tn->parent)
576 inflate_threshold_use = inflate_threshold_root;
577 else
578 inflate_threshold_use = inflate_threshold;
580 err = 0;
581 max_resize = 10;
582 while ((tn->full_children > 0 && max_resize-- &&
583 50 * (tn->full_children + tnode_child_length(tn)
584 - tn->empty_children)
585 >= inflate_threshold_use * tnode_child_length(tn))) {
587 old_tn = tn;
588 tn = inflate(t, tn);
590 if (IS_ERR(tn)) {
591 tn = old_tn;
592 #ifdef CONFIG_IP_FIB_TRIE_STATS
593 t->stats.resize_node_skipped++;
594 #endif
595 break;
599 if (max_resize < 0) {
600 if (!tn->parent)
601 pr_warning("Fix inflate_threshold_root."
602 " Now=%d size=%d bits\n",
603 inflate_threshold_root, tn->bits);
604 else
605 pr_warning("Fix inflate_threshold."
606 " Now=%d size=%d bits\n",
607 inflate_threshold, tn->bits);
610 check_tnode(tn);
613 * Halve as long as the number of empty children in this
614 * node is above threshold.
618 /* Keep root node larger */
620 if (!tn->parent)
621 halve_threshold_use = halve_threshold_root;
622 else
623 halve_threshold_use = halve_threshold;
625 err = 0;
626 max_resize = 10;
627 while (tn->bits > 1 && max_resize-- &&
628 100 * (tnode_child_length(tn) - tn->empty_children) <
629 halve_threshold_use * tnode_child_length(tn)) {
631 old_tn = tn;
632 tn = halve(t, tn);
633 if (IS_ERR(tn)) {
634 tn = old_tn;
635 #ifdef CONFIG_IP_FIB_TRIE_STATS
636 t->stats.resize_node_skipped++;
637 #endif
638 break;
642 if (max_resize < 0) {
643 if (!tn->parent)
644 pr_warning("Fix halve_threshold_root."
645 " Now=%d size=%d bits\n",
646 halve_threshold_root, tn->bits);
647 else
648 pr_warning("Fix halve_threshold."
649 " Now=%d size=%d bits\n",
650 halve_threshold, tn->bits);
653 /* Only one child remains */
654 if (tn->empty_children == tnode_child_length(tn) - 1)
655 for (i = 0; i < tnode_child_length(tn); i++) {
656 struct node *n;
658 n = tn->child[i];
659 if (!n)
660 continue;
662 /* compress one level */
664 node_set_parent(n, NULL);
665 tnode_free(tn);
666 return n;
669 return (struct node *) tn;
672 static struct tnode *inflate(struct trie *t, struct tnode *tn)
674 struct tnode *oldtnode = tn;
675 int olen = tnode_child_length(tn);
676 int i;
678 pr_debug("In inflate\n");
680 tn = tnode_new(oldtnode->key, oldtnode->pos, oldtnode->bits + 1);
682 if (!tn)
683 return ERR_PTR(-ENOMEM);
686 * Preallocate and store tnodes before the actual work so we
687 * don't get into an inconsistent state if memory allocation
688 * fails. In case of failure we return the oldnode and inflate
689 * of tnode is ignored.
692 for (i = 0; i < olen; i++) {
693 struct tnode *inode;
695 inode = (struct tnode *) tnode_get_child(oldtnode, i);
696 if (inode &&
697 IS_TNODE(inode) &&
698 inode->pos == oldtnode->pos + oldtnode->bits &&
699 inode->bits > 1) {
700 struct tnode *left, *right;
701 t_key m = ~0U << (KEYLENGTH - 1) >> inode->pos;
703 left = tnode_new(inode->key&(~m), inode->pos + 1,
704 inode->bits - 1);
705 if (!left)
706 goto nomem;
708 right = tnode_new(inode->key|m, inode->pos + 1,
709 inode->bits - 1);
711 if (!right) {
712 tnode_free(left);
713 goto nomem;
716 put_child(t, tn, 2*i, (struct node *) left);
717 put_child(t, tn, 2*i+1, (struct node *) right);
721 for (i = 0; i < olen; i++) {
722 struct tnode *inode;
723 struct node *node = tnode_get_child(oldtnode, i);
724 struct tnode *left, *right;
725 int size, j;
727 /* An empty child */
728 if (node == NULL)
729 continue;
731 /* A leaf or an internal node with skipped bits */
733 if (IS_LEAF(node) || ((struct tnode *) node)->pos >
734 tn->pos + tn->bits - 1) {
735 if (tkey_extract_bits(node->key,
736 oldtnode->pos + oldtnode->bits,
737 1) == 0)
738 put_child(t, tn, 2*i, node);
739 else
740 put_child(t, tn, 2*i+1, node);
741 continue;
744 /* An internal node with two children */
745 inode = (struct tnode *) node;
747 if (inode->bits == 1) {
748 put_child(t, tn, 2*i, inode->child[0]);
749 put_child(t, tn, 2*i+1, inode->child[1]);
751 tnode_free(inode);
752 continue;
755 /* An internal node with more than two children */
757 /* We will replace this node 'inode' with two new
758 * ones, 'left' and 'right', each with half of the
759 * original children. The two new nodes will have
760 * a position one bit further down the key and this
761 * means that the "significant" part of their keys
762 * (see the discussion near the top of this file)
763 * will differ by one bit, which will be "0" in
764 * left's key and "1" in right's key. Since we are
765 * moving the key position by one step, the bit that
766 * we are moving away from - the bit at position
767 * (inode->pos) - is the one that will differ between
768 * left and right. So... we synthesize that bit in the
769 * two new keys.
770 * The mask 'm' below will be a single "one" bit at
771 * the position (inode->pos)
774 /* Use the old key, but set the new significant
775 * bit to zero.
778 left = (struct tnode *) tnode_get_child(tn, 2*i);
779 put_child(t, tn, 2*i, NULL);
781 BUG_ON(!left);
783 right = (struct tnode *) tnode_get_child(tn, 2*i+1);
784 put_child(t, tn, 2*i+1, NULL);
786 BUG_ON(!right);
788 size = tnode_child_length(left);
789 for (j = 0; j < size; j++) {
790 put_child(t, left, j, inode->child[j]);
791 put_child(t, right, j, inode->child[j + size]);
793 put_child(t, tn, 2*i, resize(t, left));
794 put_child(t, tn, 2*i+1, resize(t, right));
796 tnode_free(inode);
798 tnode_free(oldtnode);
799 return tn;
800 nomem:
802 int size = tnode_child_length(tn);
803 int j;
805 for (j = 0; j < size; j++)
806 if (tn->child[j])
807 tnode_free((struct tnode *)tn->child[j]);
809 tnode_free(tn);
811 return ERR_PTR(-ENOMEM);
815 static struct tnode *halve(struct trie *t, struct tnode *tn)
817 struct tnode *oldtnode = tn;
818 struct node *left, *right;
819 int i;
820 int olen = tnode_child_length(tn);
822 pr_debug("In halve\n");
824 tn = tnode_new(oldtnode->key, oldtnode->pos, oldtnode->bits - 1);
826 if (!tn)
827 return ERR_PTR(-ENOMEM);
830 * Preallocate and store tnodes before the actual work so we
831 * don't get into an inconsistent state if memory allocation
832 * fails. In case of failure we return the oldnode and halve
833 * of tnode is ignored.
836 for (i = 0; i < olen; i += 2) {
837 left = tnode_get_child(oldtnode, i);
838 right = tnode_get_child(oldtnode, i+1);
840 /* Two nonempty children */
841 if (left && right) {
842 struct tnode *newn;
844 newn = tnode_new(left->key, tn->pos + tn->bits, 1);
846 if (!newn)
847 goto nomem;
849 put_child(t, tn, i/2, (struct node *)newn);
854 for (i = 0; i < olen; i += 2) {
855 struct tnode *newBinNode;
857 left = tnode_get_child(oldtnode, i);
858 right = tnode_get_child(oldtnode, i+1);
860 /* At least one of the children is empty */
861 if (left == NULL) {
862 if (right == NULL) /* Both are empty */
863 continue;
864 put_child(t, tn, i/2, right);
865 continue;
868 if (right == NULL) {
869 put_child(t, tn, i/2, left);
870 continue;
873 /* Two nonempty children */
874 newBinNode = (struct tnode *) tnode_get_child(tn, i/2);
875 put_child(t, tn, i/2, NULL);
876 put_child(t, newBinNode, 0, left);
877 put_child(t, newBinNode, 1, right);
878 put_child(t, tn, i/2, resize(t, newBinNode));
880 tnode_free(oldtnode);
881 return tn;
882 nomem:
884 int size = tnode_child_length(tn);
885 int j;
887 for (j = 0; j < size; j++)
888 if (tn->child[j])
889 tnode_free((struct tnode *)tn->child[j]);
891 tnode_free(tn);
893 return ERR_PTR(-ENOMEM);
897 /* readside must use rcu_read_lock currently dump routines
898 via get_fa_head and dump */
900 static struct leaf_info *find_leaf_info(struct leaf *l, int plen)
902 struct hlist_head *head = &l->list;
903 struct hlist_node *node;
904 struct leaf_info *li;
906 hlist_for_each_entry_rcu(li, node, head, hlist)
907 if (li->plen == plen)
908 return li;
910 return NULL;
913 static inline struct list_head *get_fa_head(struct leaf *l, int plen)
915 struct leaf_info *li = find_leaf_info(l, plen);
917 if (!li)
918 return NULL;
920 return &li->falh;
923 static void insert_leaf_info(struct hlist_head *head, struct leaf_info *new)
925 struct leaf_info *li = NULL, *last = NULL;
926 struct hlist_node *node;
928 if (hlist_empty(head)) {
929 hlist_add_head_rcu(&new->hlist, head);
930 } else {
931 hlist_for_each_entry(li, node, head, hlist) {
932 if (new->plen > li->plen)
933 break;
935 last = li;
937 if (last)
938 hlist_add_after_rcu(&last->hlist, &new->hlist);
939 else
940 hlist_add_before_rcu(&new->hlist, &li->hlist);
944 /* rcu_read_lock needs to be hold by caller from readside */
946 static struct leaf *
947 fib_find_node(struct trie *t, u32 key)
949 int pos;
950 struct tnode *tn;
951 struct node *n;
953 pos = 0;
954 n = rcu_dereference(t->trie);
956 while (n != NULL && NODE_TYPE(n) == T_TNODE) {
957 tn = (struct tnode *) n;
959 check_tnode(tn);
961 if (tkey_sub_equals(tn->key, pos, tn->pos-pos, key)) {
962 pos = tn->pos + tn->bits;
963 n = tnode_get_child_rcu(tn,
964 tkey_extract_bits(key,
965 tn->pos,
966 tn->bits));
967 } else
968 break;
970 /* Case we have found a leaf. Compare prefixes */
972 if (n != NULL && IS_LEAF(n) && tkey_equals(key, n->key))
973 return (struct leaf *)n;
975 return NULL;
978 static struct node *trie_rebalance(struct trie *t, struct tnode *tn)
980 int wasfull;
981 t_key cindex, key = tn->key;
982 struct tnode *tp;
984 while (tn != NULL && (tp = node_parent((struct node *)tn)) != NULL) {
985 cindex = tkey_extract_bits(key, tp->pos, tp->bits);
986 wasfull = tnode_full(tp, tnode_get_child(tp, cindex));
987 tn = (struct tnode *) resize(t, (struct tnode *)tn);
989 tnode_put_child_reorg((struct tnode *)tp, cindex,
990 (struct node *)tn, wasfull);
992 tp = node_parent((struct node *) tn);
993 if (!tp)
994 break;
995 tn = tp;
998 /* Handle last (top) tnode */
999 if (IS_TNODE(tn))
1000 tn = (struct tnode *)resize(t, (struct tnode *)tn);
1002 return (struct node *)tn;
1005 /* only used from updater-side */
1007 static struct list_head *fib_insert_node(struct trie *t, u32 key, int plen)
1009 int pos, newpos;
1010 struct tnode *tp = NULL, *tn = NULL;
1011 struct node *n;
1012 struct leaf *l;
1013 int missbit;
1014 struct list_head *fa_head = NULL;
1015 struct leaf_info *li;
1016 t_key cindex;
1018 pos = 0;
1019 n = t->trie;
1021 /* If we point to NULL, stop. Either the tree is empty and we should
1022 * just put a new leaf in if, or we have reached an empty child slot,
1023 * and we should just put our new leaf in that.
1024 * If we point to a T_TNODE, check if it matches our key. Note that
1025 * a T_TNODE might be skipping any number of bits - its 'pos' need
1026 * not be the parent's 'pos'+'bits'!
1028 * If it does match the current key, get pos/bits from it, extract
1029 * the index from our key, push the T_TNODE and walk the tree.
1031 * If it doesn't, we have to replace it with a new T_TNODE.
1033 * If we point to a T_LEAF, it might or might not have the same key
1034 * as we do. If it does, just change the value, update the T_LEAF's
1035 * value, and return it.
1036 * If it doesn't, we need to replace it with a T_TNODE.
1039 while (n != NULL && NODE_TYPE(n) == T_TNODE) {
1040 tn = (struct tnode *) n;
1042 check_tnode(tn);
1044 if (tkey_sub_equals(tn->key, pos, tn->pos-pos, key)) {
1045 tp = tn;
1046 pos = tn->pos + tn->bits;
1047 n = tnode_get_child(tn,
1048 tkey_extract_bits(key,
1049 tn->pos,
1050 tn->bits));
1052 BUG_ON(n && node_parent(n) != tn);
1053 } else
1054 break;
1058 * n ----> NULL, LEAF or TNODE
1060 * tp is n's (parent) ----> NULL or TNODE
1063 BUG_ON(tp && IS_LEAF(tp));
1065 /* Case 1: n is a leaf. Compare prefixes */
1067 if (n != NULL && IS_LEAF(n) && tkey_equals(key, n->key)) {
1068 l = (struct leaf *) n;
1069 li = leaf_info_new(plen);
1071 if (!li)
1072 return NULL;
1074 fa_head = &li->falh;
1075 insert_leaf_info(&l->list, li);
1076 goto done;
1078 l = leaf_new();
1080 if (!l)
1081 return NULL;
1083 l->key = key;
1084 li = leaf_info_new(plen);
1086 if (!li) {
1087 tnode_free((struct tnode *) l);
1088 return NULL;
1091 fa_head = &li->falh;
1092 insert_leaf_info(&l->list, li);
1094 if (t->trie && n == NULL) {
1095 /* Case 2: n is NULL, and will just insert a new leaf */
1097 node_set_parent((struct node *)l, tp);
1099 cindex = tkey_extract_bits(key, tp->pos, tp->bits);
1100 put_child(t, (struct tnode *)tp, cindex, (struct node *)l);
1101 } else {
1102 /* Case 3: n is a LEAF or a TNODE and the key doesn't match. */
1104 * Add a new tnode here
1105 * first tnode need some special handling
1108 if (tp)
1109 pos = tp->pos+tp->bits;
1110 else
1111 pos = 0;
1113 if (n) {
1114 newpos = tkey_mismatch(key, pos, n->key);
1115 tn = tnode_new(n->key, newpos, 1);
1116 } else {
1117 newpos = 0;
1118 tn = tnode_new(key, newpos, 1); /* First tnode */
1121 if (!tn) {
1122 free_leaf_info(li);
1123 tnode_free((struct tnode *) l);
1124 return NULL;
1127 node_set_parent((struct node *)tn, tp);
1129 missbit = tkey_extract_bits(key, newpos, 1);
1130 put_child(t, tn, missbit, (struct node *)l);
1131 put_child(t, tn, 1-missbit, n);
1133 if (tp) {
1134 cindex = tkey_extract_bits(key, tp->pos, tp->bits);
1135 put_child(t, (struct tnode *)tp, cindex,
1136 (struct node *)tn);
1137 } else {
1138 rcu_assign_pointer(t->trie, (struct node *)tn);
1139 tp = tn;
1143 if (tp && tp->pos + tp->bits > 32)
1144 pr_warning("fib_trie"
1145 " tp=%p pos=%d, bits=%d, key=%0x plen=%d\n",
1146 tp, tp->pos, tp->bits, key, plen);
1148 /* Rebalance the trie */
1150 rcu_assign_pointer(t->trie, trie_rebalance(t, tp));
1151 done:
1152 return fa_head;
1156 * Caller must hold RTNL.
1158 static int fn_trie_insert(struct fib_table *tb, struct fib_config *cfg)
1160 struct trie *t = (struct trie *) tb->tb_data;
1161 struct fib_alias *fa, *new_fa;
1162 struct list_head *fa_head = NULL;
1163 struct fib_info *fi;
1164 int plen = cfg->fc_dst_len;
1165 u8 tos = cfg->fc_tos;
1166 u32 key, mask;
1167 int err;
1168 struct leaf *l;
1170 if (plen > 32)
1171 return -EINVAL;
1173 key = ntohl(cfg->fc_dst);
1175 pr_debug("Insert table=%u %08x/%d\n", tb->tb_id, key, plen);
1177 mask = ntohl(inet_make_mask(plen));
1179 if (key & ~mask)
1180 return -EINVAL;
1182 key = key & mask;
1184 fi = fib_create_info(cfg);
1185 if (IS_ERR(fi)) {
1186 err = PTR_ERR(fi);
1187 goto err;
1190 l = fib_find_node(t, key);
1191 fa = NULL;
1193 if (l) {
1194 fa_head = get_fa_head(l, plen);
1195 fa = fib_find_alias(fa_head, tos, fi->fib_priority);
1198 /* Now fa, if non-NULL, points to the first fib alias
1199 * with the same keys [prefix,tos,priority], if such key already
1200 * exists or to the node before which we will insert new one.
1202 * If fa is NULL, we will need to allocate a new one and
1203 * insert to the head of f.
1205 * If f is NULL, no fib node matched the destination key
1206 * and we need to allocate a new one of those as well.
1209 if (fa && fa->fa_info->fib_priority == fi->fib_priority) {
1210 struct fib_alias *fa_orig;
1212 err = -EEXIST;
1213 if (cfg->fc_nlflags & NLM_F_EXCL)
1214 goto out;
1216 if (cfg->fc_nlflags & NLM_F_REPLACE) {
1217 struct fib_info *fi_drop;
1218 u8 state;
1220 if (fi->fib_treeref > 1)
1221 goto out;
1223 err = -ENOBUFS;
1224 new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
1225 if (new_fa == NULL)
1226 goto out;
1228 fi_drop = fa->fa_info;
1229 new_fa->fa_tos = fa->fa_tos;
1230 new_fa->fa_info = fi;
1231 new_fa->fa_type = cfg->fc_type;
1232 new_fa->fa_scope = cfg->fc_scope;
1233 state = fa->fa_state;
1234 new_fa->fa_state &= ~FA_S_ACCESSED;
1236 list_replace_rcu(&fa->fa_list, &new_fa->fa_list);
1237 alias_free_mem_rcu(fa);
1239 fib_release_info(fi_drop);
1240 if (state & FA_S_ACCESSED)
1241 rt_cache_flush(-1);
1242 rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen,
1243 tb->tb_id, &cfg->fc_nlinfo, NLM_F_REPLACE);
1245 goto succeeded;
1247 /* Error if we find a perfect match which
1248 * uses the same scope, type, and nexthop
1249 * information.
1251 fa_orig = fa;
1252 list_for_each_entry(fa, fa_orig->fa_list.prev, fa_list) {
1253 if (fa->fa_tos != tos)
1254 break;
1255 if (fa->fa_info->fib_priority != fi->fib_priority)
1256 break;
1257 if (fa->fa_type == cfg->fc_type &&
1258 fa->fa_scope == cfg->fc_scope &&
1259 fa->fa_info == fi)
1260 goto out;
1263 if (!(cfg->fc_nlflags & NLM_F_APPEND))
1264 fa = fa_orig;
1266 err = -ENOENT;
1267 if (!(cfg->fc_nlflags & NLM_F_CREATE))
1268 goto out;
1270 err = -ENOBUFS;
1271 new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
1272 if (new_fa == NULL)
1273 goto out;
1275 new_fa->fa_info = fi;
1276 new_fa->fa_tos = tos;
1277 new_fa->fa_type = cfg->fc_type;
1278 new_fa->fa_scope = cfg->fc_scope;
1279 new_fa->fa_state = 0;
1281 * Insert new entry to the list.
1284 if (!fa_head) {
1285 fa_head = fib_insert_node(t, key, plen);
1286 if (unlikely(!fa_head)) {
1287 err = -ENOMEM;
1288 goto out_free_new_fa;
1292 list_add_tail_rcu(&new_fa->fa_list,
1293 (fa ? &fa->fa_list : fa_head));
1295 t->size++;
1297 rt_cache_flush(-1);
1298 rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, tb->tb_id,
1299 &cfg->fc_nlinfo, 0);
1300 succeeded:
1301 return 0;
1303 out_free_new_fa:
1304 kmem_cache_free(fn_alias_kmem, new_fa);
1305 out:
1306 fib_release_info(fi);
1307 err:
1308 return err;
1312 /* should be called with rcu_read_lock */
1313 static int check_leaf(struct trie *t, struct leaf *l,
1314 t_key key, const struct flowi *flp,
1315 struct fib_result *res)
1317 struct leaf_info *li;
1318 struct hlist_head *hhead = &l->list;
1319 struct hlist_node *node;
1321 hlist_for_each_entry_rcu(li, node, hhead, hlist) {
1322 int err;
1323 int plen = li->plen;
1324 __be32 mask = inet_make_mask(plen);
1326 if (l->key != (key & ntohl(mask)))
1327 continue;
1329 err = fib_semantic_match(&li->falh, flp, res,
1330 htonl(l->key), mask, plen);
1332 #ifdef CONFIG_IP_FIB_TRIE_STATS
1333 if (err <= 0)
1334 t->stats.semantic_match_passed++;
1335 else
1336 t->stats.semantic_match_miss++;
1337 #endif
1338 if (err <= 0)
1339 return plen;
1342 return -1;
1345 static int fn_trie_lookup(struct fib_table *tb, const struct flowi *flp,
1346 struct fib_result *res)
1348 struct trie *t = (struct trie *) tb->tb_data;
1349 int plen, ret = 0;
1350 struct node *n;
1351 struct tnode *pn;
1352 int pos, bits;
1353 t_key key = ntohl(flp->fl4_dst);
1354 int chopped_off;
1355 t_key cindex = 0;
1356 int current_prefix_length = KEYLENGTH;
1357 struct tnode *cn;
1358 t_key node_prefix, key_prefix, pref_mismatch;
1359 int mp;
1361 rcu_read_lock();
1363 n = rcu_dereference(t->trie);
1364 if (!n)
1365 goto failed;
1367 #ifdef CONFIG_IP_FIB_TRIE_STATS
1368 t->stats.gets++;
1369 #endif
1371 /* Just a leaf? */
1372 if (IS_LEAF(n)) {
1373 plen = check_leaf(t, (struct leaf *)n, key, flp, res);
1374 if (plen < 0)
1375 goto failed;
1376 ret = 0;
1377 goto found;
1380 pn = (struct tnode *) n;
1381 chopped_off = 0;
1383 while (pn) {
1384 pos = pn->pos;
1385 bits = pn->bits;
1387 if (!chopped_off)
1388 cindex = tkey_extract_bits(mask_pfx(key, current_prefix_length),
1389 pos, bits);
1391 n = tnode_get_child(pn, cindex);
1393 if (n == NULL) {
1394 #ifdef CONFIG_IP_FIB_TRIE_STATS
1395 t->stats.null_node_hit++;
1396 #endif
1397 goto backtrace;
1400 if (IS_LEAF(n)) {
1401 plen = check_leaf(t, (struct leaf *)n, key, flp, res);
1402 if (plen < 0)
1403 goto backtrace;
1405 ret = 0;
1406 goto found;
1409 cn = (struct tnode *)n;
1412 * It's a tnode, and we can do some extra checks here if we
1413 * like, to avoid descending into a dead-end branch.
1414 * This tnode is in the parent's child array at index
1415 * key[p_pos..p_pos+p_bits] but potentially with some bits
1416 * chopped off, so in reality the index may be just a
1417 * subprefix, padded with zero at the end.
1418 * We can also take a look at any skipped bits in this
1419 * tnode - everything up to p_pos is supposed to be ok,
1420 * and the non-chopped bits of the index (se previous
1421 * paragraph) are also guaranteed ok, but the rest is
1422 * considered unknown.
1424 * The skipped bits are key[pos+bits..cn->pos].
1427 /* If current_prefix_length < pos+bits, we are already doing
1428 * actual prefix matching, which means everything from
1429 * pos+(bits-chopped_off) onward must be zero along some
1430 * branch of this subtree - otherwise there is *no* valid
1431 * prefix present. Here we can only check the skipped
1432 * bits. Remember, since we have already indexed into the
1433 * parent's child array, we know that the bits we chopped of
1434 * *are* zero.
1437 /* NOTA BENE: Checking only skipped bits
1438 for the new node here */
1440 if (current_prefix_length < pos+bits) {
1441 if (tkey_extract_bits(cn->key, current_prefix_length,
1442 cn->pos - current_prefix_length)
1443 || !(cn->child[0]))
1444 goto backtrace;
1448 * If chopped_off=0, the index is fully validated and we
1449 * only need to look at the skipped bits for this, the new,
1450 * tnode. What we actually want to do is to find out if
1451 * these skipped bits match our key perfectly, or if we will
1452 * have to count on finding a matching prefix further down,
1453 * because if we do, we would like to have some way of
1454 * verifying the existence of such a prefix at this point.
1457 /* The only thing we can do at this point is to verify that
1458 * any such matching prefix can indeed be a prefix to our
1459 * key, and if the bits in the node we are inspecting that
1460 * do not match our key are not ZERO, this cannot be true.
1461 * Thus, find out where there is a mismatch (before cn->pos)
1462 * and verify that all the mismatching bits are zero in the
1463 * new tnode's key.
1467 * Note: We aren't very concerned about the piece of
1468 * the key that precede pn->pos+pn->bits, since these
1469 * have already been checked. The bits after cn->pos
1470 * aren't checked since these are by definition
1471 * "unknown" at this point. Thus, what we want to see
1472 * is if we are about to enter the "prefix matching"
1473 * state, and in that case verify that the skipped
1474 * bits that will prevail throughout this subtree are
1475 * zero, as they have to be if we are to find a
1476 * matching prefix.
1479 node_prefix = mask_pfx(cn->key, cn->pos);
1480 key_prefix = mask_pfx(key, cn->pos);
1481 pref_mismatch = key_prefix^node_prefix;
1482 mp = 0;
1485 * In short: If skipped bits in this node do not match
1486 * the search key, enter the "prefix matching"
1487 * state.directly.
1489 if (pref_mismatch) {
1490 while (!(pref_mismatch & (1<<(KEYLENGTH-1)))) {
1491 mp++;
1492 pref_mismatch = pref_mismatch << 1;
1494 key_prefix = tkey_extract_bits(cn->key, mp, cn->pos-mp);
1496 if (key_prefix != 0)
1497 goto backtrace;
1499 if (current_prefix_length >= cn->pos)
1500 current_prefix_length = mp;
1503 pn = (struct tnode *)n; /* Descend */
1504 chopped_off = 0;
1505 continue;
1507 backtrace:
1508 chopped_off++;
1510 /* As zero don't change the child key (cindex) */
1511 while ((chopped_off <= pn->bits)
1512 && !(cindex & (1<<(chopped_off-1))))
1513 chopped_off++;
1515 /* Decrease current_... with bits chopped off */
1516 if (current_prefix_length > pn->pos + pn->bits - chopped_off)
1517 current_prefix_length = pn->pos + pn->bits
1518 - chopped_off;
1521 * Either we do the actual chop off according or if we have
1522 * chopped off all bits in this tnode walk up to our parent.
1525 if (chopped_off <= pn->bits) {
1526 cindex &= ~(1 << (chopped_off-1));
1527 } else {
1528 struct tnode *parent = node_parent((struct node *) pn);
1529 if (!parent)
1530 goto failed;
1532 /* Get Child's index */
1533 cindex = tkey_extract_bits(pn->key, parent->pos, parent->bits);
1534 pn = parent;
1535 chopped_off = 0;
1537 #ifdef CONFIG_IP_FIB_TRIE_STATS
1538 t->stats.backtrack++;
1539 #endif
1540 goto backtrace;
1543 failed:
1544 ret = 1;
1545 found:
1546 rcu_read_unlock();
1547 return ret;
1550 /* only called from updater side */
1551 static int trie_leaf_remove(struct trie *t, t_key key)
1553 t_key cindex;
1554 struct tnode *tp = NULL;
1555 struct node *n = t->trie;
1556 struct leaf *l;
1558 pr_debug("entering trie_leaf_remove(%p)\n", n);
1560 /* Note that in the case skipped bits, those bits are *not* checked!
1561 * When we finish this, we will have NULL or a T_LEAF, and the
1562 * T_LEAF may or may not match our key.
1565 while (n != NULL && IS_TNODE(n)) {
1566 struct tnode *tn = (struct tnode *) n;
1567 check_tnode(tn);
1568 n = tnode_get_child(tn, tkey_extract_bits(key,
1569 tn->pos, tn->bits));
1571 BUG_ON(n && node_parent(n) != tn);
1573 l = (struct leaf *) n;
1575 if (!n || !tkey_equals(l->key, key))
1576 return 0;
1579 * Key found.
1580 * Remove the leaf and rebalance the tree
1583 t->size--;
1585 tp = node_parent(n);
1586 tnode_free((struct tnode *) n);
1588 if (tp) {
1589 cindex = tkey_extract_bits(key, tp->pos, tp->bits);
1590 put_child(t, (struct tnode *)tp, cindex, NULL);
1591 rcu_assign_pointer(t->trie, trie_rebalance(t, tp));
1592 } else
1593 rcu_assign_pointer(t->trie, NULL);
1595 return 1;
1599 * Caller must hold RTNL.
1601 static int fn_trie_delete(struct fib_table *tb, struct fib_config *cfg)
1603 struct trie *t = (struct trie *) tb->tb_data;
1604 u32 key, mask;
1605 int plen = cfg->fc_dst_len;
1606 u8 tos = cfg->fc_tos;
1607 struct fib_alias *fa, *fa_to_delete;
1608 struct list_head *fa_head;
1609 struct leaf *l;
1610 struct leaf_info *li;
1612 if (plen > 32)
1613 return -EINVAL;
1615 key = ntohl(cfg->fc_dst);
1616 mask = ntohl(inet_make_mask(plen));
1618 if (key & ~mask)
1619 return -EINVAL;
1621 key = key & mask;
1622 l = fib_find_node(t, key);
1624 if (!l)
1625 return -ESRCH;
1627 fa_head = get_fa_head(l, plen);
1628 fa = fib_find_alias(fa_head, tos, 0);
1630 if (!fa)
1631 return -ESRCH;
1633 pr_debug("Deleting %08x/%d tos=%d t=%p\n", key, plen, tos, t);
1635 fa_to_delete = NULL;
1636 fa_head = fa->fa_list.prev;
1638 list_for_each_entry(fa, fa_head, fa_list) {
1639 struct fib_info *fi = fa->fa_info;
1641 if (fa->fa_tos != tos)
1642 break;
1644 if ((!cfg->fc_type || fa->fa_type == cfg->fc_type) &&
1645 (cfg->fc_scope == RT_SCOPE_NOWHERE ||
1646 fa->fa_scope == cfg->fc_scope) &&
1647 (!cfg->fc_protocol ||
1648 fi->fib_protocol == cfg->fc_protocol) &&
1649 fib_nh_match(cfg, fi) == 0) {
1650 fa_to_delete = fa;
1651 break;
1655 if (!fa_to_delete)
1656 return -ESRCH;
1658 fa = fa_to_delete;
1659 rtmsg_fib(RTM_DELROUTE, htonl(key), fa, plen, tb->tb_id,
1660 &cfg->fc_nlinfo, 0);
1662 l = fib_find_node(t, key);
1663 li = find_leaf_info(l, plen);
1665 list_del_rcu(&fa->fa_list);
1667 if (list_empty(fa_head)) {
1668 hlist_del_rcu(&li->hlist);
1669 free_leaf_info(li);
1672 if (hlist_empty(&l->list))
1673 trie_leaf_remove(t, key);
1675 if (fa->fa_state & FA_S_ACCESSED)
1676 rt_cache_flush(-1);
1678 fib_release_info(fa->fa_info);
1679 alias_free_mem_rcu(fa);
1680 return 0;
1683 static int trie_flush_list(struct trie *t, struct list_head *head)
1685 struct fib_alias *fa, *fa_node;
1686 int found = 0;
1688 list_for_each_entry_safe(fa, fa_node, head, fa_list) {
1689 struct fib_info *fi = fa->fa_info;
1691 if (fi && (fi->fib_flags & RTNH_F_DEAD)) {
1692 list_del_rcu(&fa->fa_list);
1693 fib_release_info(fa->fa_info);
1694 alias_free_mem_rcu(fa);
1695 found++;
1698 return found;
1701 static int trie_flush_leaf(struct trie *t, struct leaf *l)
1703 int found = 0;
1704 struct hlist_head *lih = &l->list;
1705 struct hlist_node *node, *tmp;
1706 struct leaf_info *li = NULL;
1708 hlist_for_each_entry_safe(li, node, tmp, lih, hlist) {
1709 found += trie_flush_list(t, &li->falh);
1711 if (list_empty(&li->falh)) {
1712 hlist_del_rcu(&li->hlist);
1713 free_leaf_info(li);
1716 return found;
1719 /* rcu_read_lock needs to be hold by caller from readside */
1721 static struct leaf *nextleaf(struct trie *t, struct leaf *thisleaf)
1723 struct node *c = (struct node *) thisleaf;
1724 struct tnode *p;
1725 int idx;
1726 struct node *trie = rcu_dereference(t->trie);
1728 if (c == NULL) {
1729 if (trie == NULL)
1730 return NULL;
1732 if (IS_LEAF(trie)) /* trie w. just a leaf */
1733 return (struct leaf *) trie;
1735 p = (struct tnode *)trie; /* Start */
1736 } else
1737 p = node_parent_rcu(c);
1739 while (p) {
1740 int pos, last;
1742 /* Find the next child of the parent */
1743 if (c)
1744 pos = 1 + tkey_extract_bits(c->key, p->pos, p->bits);
1745 else
1746 pos = 0;
1748 last = 1 << p->bits;
1749 for (idx = pos; idx < last ; idx++) {
1750 c = rcu_dereference(p->child[idx]);
1752 if (!c)
1753 continue;
1755 /* Decend if tnode */
1756 while (IS_TNODE(c)) {
1757 p = (struct tnode *) c;
1758 idx = 0;
1760 /* Rightmost non-NULL branch */
1761 if (p && IS_TNODE(p))
1762 while (!(c = rcu_dereference(p->child[idx]))
1763 && idx < (1<<p->bits)) idx++;
1765 /* Done with this tnode? */
1766 if (idx >= (1 << p->bits) || !c)
1767 goto up;
1769 return (struct leaf *) c;
1772 /* No more children go up one step */
1773 c = (struct node *) p;
1774 p = node_parent_rcu(c);
1776 return NULL; /* Ready. Root of trie */
1780 * Caller must hold RTNL.
1782 static int fn_trie_flush(struct fib_table *tb)
1784 struct trie *t = (struct trie *) tb->tb_data;
1785 struct leaf *ll = NULL, *l = NULL;
1786 int found = 0, h;
1788 for (h = 0; (l = nextleaf(t, l)) != NULL; h++) {
1789 found += trie_flush_leaf(t, l);
1791 if (ll && hlist_empty(&ll->list))
1792 trie_leaf_remove(t, ll->key);
1793 ll = l;
1796 if (ll && hlist_empty(&ll->list))
1797 trie_leaf_remove(t, ll->key);
1799 pr_debug("trie_flush found=%d\n", found);
1800 return found;
1803 static void fn_trie_select_default(struct fib_table *tb,
1804 const struct flowi *flp,
1805 struct fib_result *res)
1807 struct trie *t = (struct trie *) tb->tb_data;
1808 int order, last_idx;
1809 struct fib_info *fi = NULL;
1810 struct fib_info *last_resort;
1811 struct fib_alias *fa = NULL;
1812 struct list_head *fa_head;
1813 struct leaf *l;
1815 last_idx = -1;
1816 last_resort = NULL;
1817 order = -1;
1819 rcu_read_lock();
1821 l = fib_find_node(t, 0);
1822 if (!l)
1823 goto out;
1825 fa_head = get_fa_head(l, 0);
1826 if (!fa_head)
1827 goto out;
1829 if (list_empty(fa_head))
1830 goto out;
1832 list_for_each_entry_rcu(fa, fa_head, fa_list) {
1833 struct fib_info *next_fi = fa->fa_info;
1835 if (fa->fa_scope != res->scope ||
1836 fa->fa_type != RTN_UNICAST)
1837 continue;
1839 if (next_fi->fib_priority > res->fi->fib_priority)
1840 break;
1841 if (!next_fi->fib_nh[0].nh_gw ||
1842 next_fi->fib_nh[0].nh_scope != RT_SCOPE_LINK)
1843 continue;
1844 fa->fa_state |= FA_S_ACCESSED;
1846 if (fi == NULL) {
1847 if (next_fi != res->fi)
1848 break;
1849 } else if (!fib_detect_death(fi, order, &last_resort,
1850 &last_idx, tb->tb_default)) {
1851 fib_result_assign(res, fi);
1852 tb->tb_default = order;
1853 goto out;
1855 fi = next_fi;
1856 order++;
1858 if (order <= 0 || fi == NULL) {
1859 tb->tb_default = -1;
1860 goto out;
1863 if (!fib_detect_death(fi, order, &last_resort, &last_idx,
1864 tb->tb_default)) {
1865 fib_result_assign(res, fi);
1866 tb->tb_default = order;
1867 goto out;
1869 if (last_idx >= 0)
1870 fib_result_assign(res, last_resort);
1871 tb->tb_default = last_idx;
1872 out:
1873 rcu_read_unlock();
1876 static int fn_trie_dump_fa(t_key key, int plen, struct list_head *fah,
1877 struct fib_table *tb,
1878 struct sk_buff *skb, struct netlink_callback *cb)
1880 int i, s_i;
1881 struct fib_alias *fa;
1883 __be32 xkey = htonl(key);
1885 s_i = cb->args[4];
1886 i = 0;
1888 /* rcu_read_lock is hold by caller */
1890 list_for_each_entry_rcu(fa, fah, fa_list) {
1891 if (i < s_i) {
1892 i++;
1893 continue;
1895 BUG_ON(!fa->fa_info);
1897 if (fib_dump_info(skb, NETLINK_CB(cb->skb).pid,
1898 cb->nlh->nlmsg_seq,
1899 RTM_NEWROUTE,
1900 tb->tb_id,
1901 fa->fa_type,
1902 fa->fa_scope,
1903 xkey,
1904 plen,
1905 fa->fa_tos,
1906 fa->fa_info, 0) < 0) {
1907 cb->args[4] = i;
1908 return -1;
1910 i++;
1912 cb->args[4] = i;
1913 return skb->len;
1916 static int fn_trie_dump_plen(struct trie *t, int plen, struct fib_table *tb,
1917 struct sk_buff *skb, struct netlink_callback *cb)
1919 int h, s_h;
1920 struct list_head *fa_head;
1921 struct leaf *l = NULL;
1923 s_h = cb->args[3];
1925 for (h = 0; (l = nextleaf(t, l)) != NULL; h++) {
1926 if (h < s_h)
1927 continue;
1928 if (h > s_h)
1929 memset(&cb->args[4], 0,
1930 sizeof(cb->args) - 4*sizeof(cb->args[0]));
1932 fa_head = get_fa_head(l, plen);
1934 if (!fa_head)
1935 continue;
1937 if (list_empty(fa_head))
1938 continue;
1940 if (fn_trie_dump_fa(l->key, plen, fa_head, tb, skb, cb) < 0) {
1941 cb->args[3] = h;
1942 return -1;
1945 cb->args[3] = h;
1946 return skb->len;
1949 static int fn_trie_dump(struct fib_table *tb, struct sk_buff *skb,
1950 struct netlink_callback *cb)
1952 int m, s_m;
1953 struct trie *t = (struct trie *) tb->tb_data;
1955 s_m = cb->args[2];
1957 rcu_read_lock();
1958 for (m = 0; m <= 32; m++) {
1959 if (m < s_m)
1960 continue;
1961 if (m > s_m)
1962 memset(&cb->args[3], 0,
1963 sizeof(cb->args) - 3*sizeof(cb->args[0]));
1965 if (fn_trie_dump_plen(t, 32-m, tb, skb, cb) < 0) {
1966 cb->args[2] = m;
1967 goto out;
1970 rcu_read_unlock();
1971 cb->args[2] = m;
1972 return skb->len;
1973 out:
1974 rcu_read_unlock();
1975 return -1;
1978 void __init fib_hash_init(void)
1980 fn_alias_kmem = kmem_cache_create("ip_fib_alias",
1981 sizeof(struct fib_alias),
1982 0, SLAB_PANIC, NULL);
1984 trie_leaf_kmem = kmem_cache_create("ip_fib_trie",
1985 max(sizeof(struct leaf),
1986 sizeof(struct leaf_info)),
1987 0, SLAB_PANIC, NULL);
1991 /* Fix more generic FIB names for init later */
1992 struct fib_table *fib_hash_table(u32 id)
1994 struct fib_table *tb;
1995 struct trie *t;
1997 tb = kmalloc(sizeof(struct fib_table) + sizeof(struct trie),
1998 GFP_KERNEL);
1999 if (tb == NULL)
2000 return NULL;
2002 tb->tb_id = id;
2003 tb->tb_default = -1;
2004 tb->tb_lookup = fn_trie_lookup;
2005 tb->tb_insert = fn_trie_insert;
2006 tb->tb_delete = fn_trie_delete;
2007 tb->tb_flush = fn_trie_flush;
2008 tb->tb_select_default = fn_trie_select_default;
2009 tb->tb_dump = fn_trie_dump;
2011 t = (struct trie *) tb->tb_data;
2012 memset(t, 0, sizeof(*t));
2014 if (id == RT_TABLE_LOCAL)
2015 pr_info("IPv4 FIB: Using LC-trie version %s\n", VERSION);
2017 return tb;
2020 #ifdef CONFIG_PROC_FS
2021 /* Depth first Trie walk iterator */
2022 struct fib_trie_iter {
2023 struct seq_net_private p;
2024 struct trie *trie_local, *trie_main;
2025 struct tnode *tnode;
2026 struct trie *trie;
2027 unsigned index;
2028 unsigned depth;
2031 static struct node *fib_trie_get_next(struct fib_trie_iter *iter)
2033 struct tnode *tn = iter->tnode;
2034 unsigned cindex = iter->index;
2035 struct tnode *p;
2037 /* A single entry routing table */
2038 if (!tn)
2039 return NULL;
2041 pr_debug("get_next iter={node=%p index=%d depth=%d}\n",
2042 iter->tnode, iter->index, iter->depth);
2043 rescan:
2044 while (cindex < (1<<tn->bits)) {
2045 struct node *n = tnode_get_child_rcu(tn, cindex);
2047 if (n) {
2048 if (IS_LEAF(n)) {
2049 iter->tnode = tn;
2050 iter->index = cindex + 1;
2051 } else {
2052 /* push down one level */
2053 iter->tnode = (struct tnode *) n;
2054 iter->index = 0;
2055 ++iter->depth;
2057 return n;
2060 ++cindex;
2063 /* Current node exhausted, pop back up */
2064 p = node_parent_rcu((struct node *)tn);
2065 if (p) {
2066 cindex = tkey_extract_bits(tn->key, p->pos, p->bits)+1;
2067 tn = p;
2068 --iter->depth;
2069 goto rescan;
2072 /* got root? */
2073 return NULL;
2076 static struct node *fib_trie_get_first(struct fib_trie_iter *iter,
2077 struct trie *t)
2079 struct node *n ;
2081 if (!t)
2082 return NULL;
2084 n = rcu_dereference(t->trie);
2086 if (!iter)
2087 return NULL;
2089 if (n) {
2090 if (IS_TNODE(n)) {
2091 iter->tnode = (struct tnode *) n;
2092 iter->trie = t;
2093 iter->index = 0;
2094 iter->depth = 1;
2095 } else {
2096 iter->tnode = NULL;
2097 iter->trie = t;
2098 iter->index = 0;
2099 iter->depth = 0;
2101 return n;
2103 return NULL;
2106 static void trie_collect_stats(struct trie *t, struct trie_stat *s)
2108 struct node *n;
2109 struct fib_trie_iter iter;
2111 memset(s, 0, sizeof(*s));
2113 rcu_read_lock();
2114 for (n = fib_trie_get_first(&iter, t); n;
2115 n = fib_trie_get_next(&iter)) {
2116 if (IS_LEAF(n)) {
2117 s->leaves++;
2118 s->totdepth += iter.depth;
2119 if (iter.depth > s->maxdepth)
2120 s->maxdepth = iter.depth;
2121 } else {
2122 const struct tnode *tn = (const struct tnode *) n;
2123 int i;
2125 s->tnodes++;
2126 if (tn->bits < MAX_STAT_DEPTH)
2127 s->nodesizes[tn->bits]++;
2129 for (i = 0; i < (1<<tn->bits); i++)
2130 if (!tn->child[i])
2131 s->nullpointers++;
2134 rcu_read_unlock();
2138 * This outputs /proc/net/fib_triestats
2140 static void trie_show_stats(struct seq_file *seq, struct trie_stat *stat)
2142 unsigned i, max, pointers, bytes, avdepth;
2144 if (stat->leaves)
2145 avdepth = stat->totdepth*100 / stat->leaves;
2146 else
2147 avdepth = 0;
2149 seq_printf(seq, "\tAver depth: %u.%02d\n",
2150 avdepth / 100, avdepth % 100);
2151 seq_printf(seq, "\tMax depth: %u\n", stat->maxdepth);
2153 seq_printf(seq, "\tLeaves: %u\n", stat->leaves);
2155 bytes = sizeof(struct leaf) * stat->leaves;
2156 seq_printf(seq, "\tInternal nodes: %u\n\t", stat->tnodes);
2157 bytes += sizeof(struct tnode) * stat->tnodes;
2159 max = MAX_STAT_DEPTH;
2160 while (max > 0 && stat->nodesizes[max-1] == 0)
2161 max--;
2163 pointers = 0;
2164 for (i = 1; i <= max; i++)
2165 if (stat->nodesizes[i] != 0) {
2166 seq_printf(seq, " %u: %u", i, stat->nodesizes[i]);
2167 pointers += (1<<i) * stat->nodesizes[i];
2169 seq_putc(seq, '\n');
2170 seq_printf(seq, "\tPointers: %u\n", pointers);
2172 bytes += sizeof(struct node *) * pointers;
2173 seq_printf(seq, "Null ptrs: %u\n", stat->nullpointers);
2174 seq_printf(seq, "Total size: %u kB\n", (bytes + 1023) / 1024);
2177 #ifdef CONFIG_IP_FIB_TRIE_STATS
2178 static void trie_show_usage(struct seq_file *seq,
2179 const struct trie_use_stats *stats)
2181 seq_printf(seq, "\nCounters:\n---------\n");
2182 seq_printf(seq, "gets = %u\n", stats->gets);
2183 seq_printf(seq, "backtracks = %u\n", stats->backtrack);
2184 seq_printf(seq, "semantic match passed = %u\n",
2185 stats->semantic_match_passed);
2186 seq_printf(seq, "semantic match miss = %u\n",
2187 stats->semantic_match_miss);
2188 seq_printf(seq, "null node hit= %u\n", stats->null_node_hit);
2189 seq_printf(seq, "skipped node resize = %u\n\n",
2190 stats->resize_node_skipped);
2192 #endif /* CONFIG_IP_FIB_TRIE_STATS */
2194 static void fib_trie_show(struct seq_file *seq, const char *name,
2195 struct trie *trie)
2197 struct trie_stat stat;
2199 seq_printf(seq, "%s: %d\n", name, trie->size);
2200 trie_collect_stats(trie, &stat);
2201 trie_show_stats(seq, &stat);
2202 #ifdef CONFIG_IP_FIB_TRIE_STATS
2203 trie_show_usage(seq, &trie->stats);
2204 #endif
2207 static int fib_triestat_seq_show(struct seq_file *seq, void *v)
2209 struct net *net = (struct net *)seq->private;
2210 struct fib_table *tb;
2212 seq_printf(seq,
2213 "Basic info: size of leaf:"
2214 " %Zd bytes, size of tnode: %Zd bytes.\n",
2215 sizeof(struct leaf), sizeof(struct tnode));
2217 tb = fib_get_table(net, RT_TABLE_LOCAL);
2218 if (tb)
2219 fib_trie_show(seq, "Local", (struct trie *) tb->tb_data);
2221 tb = fib_get_table(net, RT_TABLE_MAIN);
2222 if (tb)
2223 fib_trie_show(seq, "Main", (struct trie *) tb->tb_data);
2225 return 0;
2228 static int fib_triestat_seq_open(struct inode *inode, struct file *file)
2230 int err;
2231 struct net *net;
2233 net = get_proc_net(inode);
2234 if (net == NULL)
2235 return -ENXIO;
2236 err = single_open(file, fib_triestat_seq_show, net);
2237 if (err < 0) {
2238 put_net(net);
2239 return err;
2241 return 0;
2244 static int fib_triestat_seq_release(struct inode *ino, struct file *f)
2246 struct seq_file *seq = f->private_data;
2247 put_net(seq->private);
2248 return single_release(ino, f);
2251 static const struct file_operations fib_triestat_fops = {
2252 .owner = THIS_MODULE,
2253 .open = fib_triestat_seq_open,
2254 .read = seq_read,
2255 .llseek = seq_lseek,
2256 .release = fib_triestat_seq_release,
2259 static struct node *fib_trie_get_idx(struct fib_trie_iter *iter,
2260 loff_t pos)
2262 loff_t idx = 0;
2263 struct node *n;
2265 for (n = fib_trie_get_first(iter, iter->trie_local);
2266 n; ++idx, n = fib_trie_get_next(iter)) {
2267 if (pos == idx)
2268 return n;
2271 for (n = fib_trie_get_first(iter, iter->trie_main);
2272 n; ++idx, n = fib_trie_get_next(iter)) {
2273 if (pos == idx)
2274 return n;
2276 return NULL;
2279 static void *fib_trie_seq_start(struct seq_file *seq, loff_t *pos)
2280 __acquires(RCU)
2282 struct fib_trie_iter *iter = seq->private;
2283 struct fib_table *tb;
2285 if (!iter->trie_local) {
2286 tb = fib_get_table(iter->p.net, RT_TABLE_LOCAL);
2287 if (tb)
2288 iter->trie_local = (struct trie *) tb->tb_data;
2290 if (!iter->trie_main) {
2291 tb = fib_get_table(iter->p.net, RT_TABLE_MAIN);
2292 if (tb)
2293 iter->trie_main = (struct trie *) tb->tb_data;
2295 rcu_read_lock();
2296 if (*pos == 0)
2297 return SEQ_START_TOKEN;
2298 return fib_trie_get_idx(iter, *pos - 1);
2301 static void *fib_trie_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2303 struct fib_trie_iter *iter = seq->private;
2304 void *l = v;
2306 ++*pos;
2307 if (v == SEQ_START_TOKEN)
2308 return fib_trie_get_idx(iter, 0);
2310 v = fib_trie_get_next(iter);
2311 BUG_ON(v == l);
2312 if (v)
2313 return v;
2315 /* continue scan in next trie */
2316 if (iter->trie == iter->trie_local)
2317 return fib_trie_get_first(iter, iter->trie_main);
2319 return NULL;
2322 static void fib_trie_seq_stop(struct seq_file *seq, void *v)
2323 __releases(RCU)
2325 rcu_read_unlock();
2328 static void seq_indent(struct seq_file *seq, int n)
2330 while (n-- > 0) seq_puts(seq, " ");
2333 static inline const char *rtn_scope(char *buf, size_t len, enum rt_scope_t s)
2335 switch (s) {
2336 case RT_SCOPE_UNIVERSE: return "universe";
2337 case RT_SCOPE_SITE: return "site";
2338 case RT_SCOPE_LINK: return "link";
2339 case RT_SCOPE_HOST: return "host";
2340 case RT_SCOPE_NOWHERE: return "nowhere";
2341 default:
2342 snprintf(buf, len, "scope=%d", s);
2343 return buf;
2347 static const char *rtn_type_names[__RTN_MAX] = {
2348 [RTN_UNSPEC] = "UNSPEC",
2349 [RTN_UNICAST] = "UNICAST",
2350 [RTN_LOCAL] = "LOCAL",
2351 [RTN_BROADCAST] = "BROADCAST",
2352 [RTN_ANYCAST] = "ANYCAST",
2353 [RTN_MULTICAST] = "MULTICAST",
2354 [RTN_BLACKHOLE] = "BLACKHOLE",
2355 [RTN_UNREACHABLE] = "UNREACHABLE",
2356 [RTN_PROHIBIT] = "PROHIBIT",
2357 [RTN_THROW] = "THROW",
2358 [RTN_NAT] = "NAT",
2359 [RTN_XRESOLVE] = "XRESOLVE",
2362 static inline const char *rtn_type(char *buf, size_t len, unsigned t)
2364 if (t < __RTN_MAX && rtn_type_names[t])
2365 return rtn_type_names[t];
2366 snprintf(buf, len, "type %u", t);
2367 return buf;
2370 /* Pretty print the trie */
2371 static int fib_trie_seq_show(struct seq_file *seq, void *v)
2373 const struct fib_trie_iter *iter = seq->private;
2374 struct node *n = v;
2376 if (v == SEQ_START_TOKEN)
2377 return 0;
2379 if (!node_parent_rcu(n)) {
2380 if (iter->trie == iter->trie_local)
2381 seq_puts(seq, "<local>:\n");
2382 else
2383 seq_puts(seq, "<main>:\n");
2386 if (IS_TNODE(n)) {
2387 struct tnode *tn = (struct tnode *) n;
2388 __be32 prf = htonl(mask_pfx(tn->key, tn->pos));
2390 seq_indent(seq, iter->depth-1);
2391 seq_printf(seq, " +-- %d.%d.%d.%d/%d %d %d %d\n",
2392 NIPQUAD(prf), tn->pos, tn->bits, tn->full_children,
2393 tn->empty_children);
2395 } else {
2396 struct leaf *l = (struct leaf *) n;
2397 int i;
2398 __be32 val = htonl(l->key);
2400 seq_indent(seq, iter->depth);
2401 seq_printf(seq, " |-- %d.%d.%d.%d\n", NIPQUAD(val));
2402 for (i = 32; i >= 0; i--) {
2403 struct leaf_info *li = find_leaf_info(l, i);
2405 if (li) {
2406 struct fib_alias *fa;
2408 list_for_each_entry_rcu(fa, &li->falh, fa_list) {
2409 char buf1[32], buf2[32];
2411 seq_indent(seq, iter->depth+1);
2412 seq_printf(seq, " /%d %s %s", i,
2413 rtn_scope(buf1, sizeof(buf1),
2414 fa->fa_scope),
2415 rtn_type(buf2, sizeof(buf2),
2416 fa->fa_type));
2417 if (fa->fa_tos)
2418 seq_printf(seq, "tos =%d\n",
2419 fa->fa_tos);
2420 seq_putc(seq, '\n');
2426 return 0;
2429 static const struct seq_operations fib_trie_seq_ops = {
2430 .start = fib_trie_seq_start,
2431 .next = fib_trie_seq_next,
2432 .stop = fib_trie_seq_stop,
2433 .show = fib_trie_seq_show,
2436 static int fib_trie_seq_open(struct inode *inode, struct file *file)
2438 return seq_open_net(inode, file, &fib_trie_seq_ops,
2439 sizeof(struct fib_trie_iter));
2442 static const struct file_operations fib_trie_fops = {
2443 .owner = THIS_MODULE,
2444 .open = fib_trie_seq_open,
2445 .read = seq_read,
2446 .llseek = seq_lseek,
2447 .release = seq_release_net,
2450 static unsigned fib_flag_trans(int type, __be32 mask, const struct fib_info *fi)
2452 static unsigned type2flags[RTN_MAX + 1] = {
2453 [7] = RTF_REJECT, [8] = RTF_REJECT,
2455 unsigned flags = type2flags[type];
2457 if (fi && fi->fib_nh->nh_gw)
2458 flags |= RTF_GATEWAY;
2459 if (mask == htonl(0xFFFFFFFF))
2460 flags |= RTF_HOST;
2461 flags |= RTF_UP;
2462 return flags;
2466 * This outputs /proc/net/route.
2467 * The format of the file is not supposed to be changed
2468 * and needs to be same as fib_hash output to avoid breaking
2469 * legacy utilities
2471 static int fib_route_seq_show(struct seq_file *seq, void *v)
2473 const struct fib_trie_iter *iter = seq->private;
2474 struct leaf *l = v;
2475 int i;
2476 char bf[128];
2478 if (v == SEQ_START_TOKEN) {
2479 seq_printf(seq, "%-127s\n", "Iface\tDestination\tGateway "
2480 "\tFlags\tRefCnt\tUse\tMetric\tMask\t\tMTU"
2481 "\tWindow\tIRTT");
2482 return 0;
2485 if (iter->trie == iter->trie_local)
2486 return 0;
2488 if (IS_TNODE(l))
2489 return 0;
2491 for (i = 32; i >= 0; i--) {
2492 struct leaf_info *li = find_leaf_info(l, i);
2493 struct fib_alias *fa;
2494 __be32 mask, prefix;
2496 if (!li)
2497 continue;
2499 mask = inet_make_mask(li->plen);
2500 prefix = htonl(l->key);
2502 list_for_each_entry_rcu(fa, &li->falh, fa_list) {
2503 const struct fib_info *fi = fa->fa_info;
2504 unsigned flags = fib_flag_trans(fa->fa_type, mask, fi);
2506 if (fa->fa_type == RTN_BROADCAST
2507 || fa->fa_type == RTN_MULTICAST)
2508 continue;
2510 if (fi)
2511 snprintf(bf, sizeof(bf),
2512 "%s\t%08X\t%08X\t%04X\t%d\t%u\t%d\t%08X\t%d\t%u\t%u",
2513 fi->fib_dev ? fi->fib_dev->name : "*",
2514 prefix,
2515 fi->fib_nh->nh_gw, flags, 0, 0,
2516 fi->fib_priority,
2517 mask,
2518 (fi->fib_advmss ?
2519 fi->fib_advmss + 40 : 0),
2520 fi->fib_window,
2521 fi->fib_rtt >> 3);
2522 else
2523 snprintf(bf, sizeof(bf),
2524 "*\t%08X\t%08X\t%04X\t%d\t%u\t%d\t%08X\t%d\t%u\t%u",
2525 prefix, 0, flags, 0, 0, 0,
2526 mask, 0, 0, 0);
2528 seq_printf(seq, "%-127s\n", bf);
2532 return 0;
2535 static const struct seq_operations fib_route_seq_ops = {
2536 .start = fib_trie_seq_start,
2537 .next = fib_trie_seq_next,
2538 .stop = fib_trie_seq_stop,
2539 .show = fib_route_seq_show,
2542 static int fib_route_seq_open(struct inode *inode, struct file *file)
2544 return seq_open_net(inode, file, &fib_route_seq_ops,
2545 sizeof(struct fib_trie_iter));
2548 static const struct file_operations fib_route_fops = {
2549 .owner = THIS_MODULE,
2550 .open = fib_route_seq_open,
2551 .read = seq_read,
2552 .llseek = seq_lseek,
2553 .release = seq_release_net,
2556 int __net_init fib_proc_init(struct net *net)
2558 if (!proc_net_fops_create(net, "fib_trie", S_IRUGO, &fib_trie_fops))
2559 goto out1;
2561 if (!proc_net_fops_create(net, "fib_triestat", S_IRUGO,
2562 &fib_triestat_fops))
2563 goto out2;
2565 if (!proc_net_fops_create(net, "route", S_IRUGO, &fib_route_fops))
2566 goto out3;
2568 return 0;
2570 out3:
2571 proc_net_remove(net, "fib_triestat");
2572 out2:
2573 proc_net_remove(net, "fib_trie");
2574 out1:
2575 return -ENOMEM;
2578 void __net_exit fib_proc_exit(struct net *net)
2580 proc_net_remove(net, "fib_trie");
2581 proc_net_remove(net, "fib_triestat");
2582 proc_net_remove(net, "route");
2585 #endif /* CONFIG_PROC_FS */