[NET]: DIV_ROUND_UP cleanup (part two)
[linux-2.6/verdex.git] / net / ipv4 / fib_trie.c
blob52b2891c63b7b6e28637369007a5758cc3bfe390
1 /*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version
5 * 2 of the License, or (at your option) any later version.
7 * Robert Olsson <robert.olsson@its.uu.se> Uppsala Universitet
8 * & Swedish University of Agricultural Sciences.
10 * Jens Laas <jens.laas@data.slu.se> Swedish University of
11 * Agricultural Sciences.
13 * Hans Liss <hans.liss@its.uu.se> Uppsala Universitet
15 * This work is based on the LPC-trie which is originally descibed in:
17 * An experimental study of compression methods for dynamic tries
18 * Stefan Nilsson and Matti Tikkanen. Algorithmica, 33(1):19-33, 2002.
19 * http://www.nada.kth.se/~snilsson/public/papers/dyntrie2/
22 * IP-address lookup using LC-tries. Stefan Nilsson and Gunnar Karlsson
23 * IEEE Journal on Selected Areas in Communications, 17(6):1083-1092, June 1999
25 * Version: $Id: fib_trie.c,v 1.3 2005/06/08 14:20:01 robert Exp $
28 * Code from fib_hash has been reused which includes the following header:
31 * INET An implementation of the TCP/IP protocol suite for the LINUX
32 * operating system. INET is implemented using the BSD Socket
33 * interface as the means of communication with the user level.
35 * IPv4 FIB: lookup engine and maintenance routines.
38 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
40 * This program is free software; you can redistribute it and/or
41 * modify it under the terms of the GNU General Public License
42 * as published by the Free Software Foundation; either version
43 * 2 of the License, or (at your option) any later version.
45 * Substantial contributions to this work comes from:
47 * David S. Miller, <davem@davemloft.net>
48 * Stephen Hemminger <shemminger@osdl.org>
49 * Paul E. McKenney <paulmck@us.ibm.com>
50 * Patrick McHardy <kaber@trash.net>
53 #define VERSION "0.408"
55 #include <asm/uaccess.h>
56 #include <asm/system.h>
57 #include <asm/bitops.h>
58 #include <linux/types.h>
59 #include <linux/kernel.h>
60 #include <linux/mm.h>
61 #include <linux/string.h>
62 #include <linux/socket.h>
63 #include <linux/sockios.h>
64 #include <linux/errno.h>
65 #include <linux/in.h>
66 #include <linux/inet.h>
67 #include <linux/inetdevice.h>
68 #include <linux/netdevice.h>
69 #include <linux/if_arp.h>
70 #include <linux/proc_fs.h>
71 #include <linux/rcupdate.h>
72 #include <linux/skbuff.h>
73 #include <linux/netlink.h>
74 #include <linux/init.h>
75 #include <linux/list.h>
76 #include <net/ip.h>
77 #include <net/protocol.h>
78 #include <net/route.h>
79 #include <net/tcp.h>
80 #include <net/sock.h>
81 #include <net/ip_fib.h>
82 #include "fib_lookup.h"
84 #undef CONFIG_IP_FIB_TRIE_STATS
85 #define MAX_STAT_DEPTH 32
87 #define KEYLENGTH (8*sizeof(t_key))
89 typedef unsigned int t_key;
91 #define T_TNODE 0
92 #define T_LEAF 1
93 #define NODE_TYPE_MASK 0x1UL
94 #define NODE_TYPE(node) ((node)->parent & NODE_TYPE_MASK)
96 #define IS_TNODE(n) (!(n->parent & T_LEAF))
97 #define IS_LEAF(n) (n->parent & T_LEAF)
99 struct node {
100 t_key key;
101 unsigned long parent;
104 struct leaf {
105 t_key key;
106 unsigned long parent;
107 struct hlist_head list;
108 struct rcu_head rcu;
111 struct leaf_info {
112 struct hlist_node hlist;
113 struct rcu_head rcu;
114 int plen;
115 struct list_head falh;
118 struct tnode {
119 t_key key;
120 unsigned long parent;
121 unsigned short pos:5; /* 2log(KEYLENGTH) bits needed */
122 unsigned short bits:5; /* 2log(KEYLENGTH) bits needed */
123 unsigned short full_children; /* KEYLENGTH bits needed */
124 unsigned short empty_children; /* KEYLENGTH bits needed */
125 struct rcu_head rcu;
126 struct node *child[0];
129 #ifdef CONFIG_IP_FIB_TRIE_STATS
130 struct trie_use_stats {
131 unsigned int gets;
132 unsigned int backtrack;
133 unsigned int semantic_match_passed;
134 unsigned int semantic_match_miss;
135 unsigned int null_node_hit;
136 unsigned int resize_node_skipped;
138 #endif
140 struct trie_stat {
141 unsigned int totdepth;
142 unsigned int maxdepth;
143 unsigned int tnodes;
144 unsigned int leaves;
145 unsigned int nullpointers;
146 unsigned int nodesizes[MAX_STAT_DEPTH];
149 struct trie {
150 struct node *trie;
151 #ifdef CONFIG_IP_FIB_TRIE_STATS
152 struct trie_use_stats stats;
153 #endif
154 int size;
155 unsigned int revision;
158 static void put_child(struct trie *t, struct tnode *tn, int i, struct node *n);
159 static void tnode_put_child_reorg(struct tnode *tn, int i, struct node *n, int wasfull);
160 static struct node *resize(struct trie *t, struct tnode *tn);
161 static struct tnode *inflate(struct trie *t, struct tnode *tn);
162 static struct tnode *halve(struct trie *t, struct tnode *tn);
163 static void tnode_free(struct tnode *tn);
165 static struct kmem_cache *fn_alias_kmem __read_mostly;
166 static struct trie *trie_local = NULL, *trie_main = NULL;
168 static inline struct tnode *node_parent(struct node *node)
170 struct tnode *ret;
172 ret = (struct tnode *)(node->parent & ~NODE_TYPE_MASK);
173 return rcu_dereference(ret);
176 static inline void node_set_parent(struct node *node, struct tnode *ptr)
178 rcu_assign_pointer(node->parent,
179 (unsigned long)ptr | NODE_TYPE(node));
182 /* rcu_read_lock needs to be hold by caller from readside */
184 static inline struct node *tnode_get_child(struct tnode *tn, int i)
186 BUG_ON(i >= 1 << tn->bits);
188 return rcu_dereference(tn->child[i]);
191 static inline int tnode_child_length(const struct tnode *tn)
193 return 1 << tn->bits;
196 static inline t_key mask_pfx(t_key k, unsigned short l)
198 return (l == 0) ? 0 : k >> (KEYLENGTH-l) << (KEYLENGTH-l);
201 static inline t_key tkey_extract_bits(t_key a, int offset, int bits)
203 if (offset < KEYLENGTH)
204 return ((t_key)(a << offset)) >> (KEYLENGTH - bits);
205 else
206 return 0;
209 static inline int tkey_equals(t_key a, t_key b)
211 return a == b;
214 static inline int tkey_sub_equals(t_key a, int offset, int bits, t_key b)
216 if (bits == 0 || offset >= KEYLENGTH)
217 return 1;
218 bits = bits > KEYLENGTH ? KEYLENGTH : bits;
219 return ((a ^ b) << offset) >> (KEYLENGTH - bits) == 0;
222 static inline int tkey_mismatch(t_key a, int offset, t_key b)
224 t_key diff = a ^ b;
225 int i = offset;
227 if (!diff)
228 return 0;
229 while ((diff << i) >> (KEYLENGTH-1) == 0)
230 i++;
231 return i;
235 To understand this stuff, an understanding of keys and all their bits is
236 necessary. Every node in the trie has a key associated with it, but not
237 all of the bits in that key are significant.
239 Consider a node 'n' and its parent 'tp'.
241 If n is a leaf, every bit in its key is significant. Its presence is
242 necessitated by path compression, since during a tree traversal (when
243 searching for a leaf - unless we are doing an insertion) we will completely
244 ignore all skipped bits we encounter. Thus we need to verify, at the end of
245 a potentially successful search, that we have indeed been walking the
246 correct key path.
248 Note that we can never "miss" the correct key in the tree if present by
249 following the wrong path. Path compression ensures that segments of the key
250 that are the same for all keys with a given prefix are skipped, but the
251 skipped part *is* identical for each node in the subtrie below the skipped
252 bit! trie_insert() in this implementation takes care of that - note the
253 call to tkey_sub_equals() in trie_insert().
255 if n is an internal node - a 'tnode' here, the various parts of its key
256 have many different meanings.
258 Example:
259 _________________________________________________________________
260 | i | i | i | i | i | i | i | N | N | N | S | S | S | S | S | C |
261 -----------------------------------------------------------------
262 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
264 _________________________________________________________________
265 | C | C | C | u | u | u | u | u | u | u | u | u | u | u | u | u |
266 -----------------------------------------------------------------
267 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
269 tp->pos = 7
270 tp->bits = 3
271 n->pos = 15
272 n->bits = 4
274 First, let's just ignore the bits that come before the parent tp, that is
275 the bits from 0 to (tp->pos-1). They are *known* but at this point we do
276 not use them for anything.
278 The bits from (tp->pos) to (tp->pos + tp->bits - 1) - "N", above - are the
279 index into the parent's child array. That is, they will be used to find
280 'n' among tp's children.
282 The bits from (tp->pos + tp->bits) to (n->pos - 1) - "S" - are skipped bits
283 for the node n.
285 All the bits we have seen so far are significant to the node n. The rest
286 of the bits are really not needed or indeed known in n->key.
288 The bits from (n->pos) to (n->pos + n->bits - 1) - "C" - are the index into
289 n's child array, and will of course be different for each child.
292 The rest of the bits, from (n->pos + n->bits) onward, are completely unknown
293 at this point.
297 static inline void check_tnode(const struct tnode *tn)
299 WARN_ON(tn && tn->pos+tn->bits > 32);
302 static int halve_threshold = 25;
303 static int inflate_threshold = 50;
304 static int halve_threshold_root = 8;
305 static int inflate_threshold_root = 15;
308 static void __alias_free_mem(struct rcu_head *head)
310 struct fib_alias *fa = container_of(head, struct fib_alias, rcu);
311 kmem_cache_free(fn_alias_kmem, fa);
314 static inline void alias_free_mem_rcu(struct fib_alias *fa)
316 call_rcu(&fa->rcu, __alias_free_mem);
319 static void __leaf_free_rcu(struct rcu_head *head)
321 kfree(container_of(head, struct leaf, rcu));
324 static void __leaf_info_free_rcu(struct rcu_head *head)
326 kfree(container_of(head, struct leaf_info, rcu));
329 static inline void free_leaf_info(struct leaf_info *leaf)
331 call_rcu(&leaf->rcu, __leaf_info_free_rcu);
334 static struct tnode *tnode_alloc(unsigned int size)
336 struct page *pages;
338 if (size <= PAGE_SIZE)
339 return kcalloc(size, 1, GFP_KERNEL);
341 pages = alloc_pages(GFP_KERNEL|__GFP_ZERO, get_order(size));
342 if (!pages)
343 return NULL;
345 return page_address(pages);
348 static void __tnode_free_rcu(struct rcu_head *head)
350 struct tnode *tn = container_of(head, struct tnode, rcu);
351 unsigned int size = sizeof(struct tnode) +
352 (1 << tn->bits) * sizeof(struct node *);
354 if (size <= PAGE_SIZE)
355 kfree(tn);
356 else
357 free_pages((unsigned long)tn, get_order(size));
360 static inline void tnode_free(struct tnode *tn)
362 if (IS_LEAF(tn)) {
363 struct leaf *l = (struct leaf *) tn;
364 call_rcu_bh(&l->rcu, __leaf_free_rcu);
365 } else
366 call_rcu(&tn->rcu, __tnode_free_rcu);
369 static struct leaf *leaf_new(void)
371 struct leaf *l = kmalloc(sizeof(struct leaf), GFP_KERNEL);
372 if (l) {
373 l->parent = T_LEAF;
374 INIT_HLIST_HEAD(&l->list);
376 return l;
379 static struct leaf_info *leaf_info_new(int plen)
381 struct leaf_info *li = kmalloc(sizeof(struct leaf_info), GFP_KERNEL);
382 if (li) {
383 li->plen = plen;
384 INIT_LIST_HEAD(&li->falh);
386 return li;
389 static struct tnode* tnode_new(t_key key, int pos, int bits)
391 int nchildren = 1<<bits;
392 int sz = sizeof(struct tnode) + nchildren * sizeof(struct node *);
393 struct tnode *tn = tnode_alloc(sz);
395 if (tn) {
396 memset(tn, 0, sz);
397 tn->parent = T_TNODE;
398 tn->pos = pos;
399 tn->bits = bits;
400 tn->key = key;
401 tn->full_children = 0;
402 tn->empty_children = 1<<bits;
405 pr_debug("AT %p s=%u %u\n", tn, (unsigned int) sizeof(struct tnode),
406 (unsigned int) (sizeof(struct node) * 1<<bits));
407 return tn;
411 * Check whether a tnode 'n' is "full", i.e. it is an internal node
412 * and no bits are skipped. See discussion in dyntree paper p. 6
415 static inline int tnode_full(const struct tnode *tn, const struct node *n)
417 if (n == NULL || IS_LEAF(n))
418 return 0;
420 return ((struct tnode *) n)->pos == tn->pos + tn->bits;
423 static inline void put_child(struct trie *t, struct tnode *tn, int i, struct node *n)
425 tnode_put_child_reorg(tn, i, n, -1);
429 * Add a child at position i overwriting the old value.
430 * Update the value of full_children and empty_children.
433 static void tnode_put_child_reorg(struct tnode *tn, int i, struct node *n, int wasfull)
435 struct node *chi = tn->child[i];
436 int isfull;
438 BUG_ON(i >= 1<<tn->bits);
441 /* update emptyChildren */
442 if (n == NULL && chi != NULL)
443 tn->empty_children++;
444 else if (n != NULL && chi == NULL)
445 tn->empty_children--;
447 /* update fullChildren */
448 if (wasfull == -1)
449 wasfull = tnode_full(tn, chi);
451 isfull = tnode_full(tn, n);
452 if (wasfull && !isfull)
453 tn->full_children--;
454 else if (!wasfull && isfull)
455 tn->full_children++;
457 if (n)
458 node_set_parent(n, tn);
460 rcu_assign_pointer(tn->child[i], n);
463 static struct node *resize(struct trie *t, struct tnode *tn)
465 int i;
466 int err = 0;
467 struct tnode *old_tn;
468 int inflate_threshold_use;
469 int halve_threshold_use;
470 int max_resize;
472 if (!tn)
473 return NULL;
475 pr_debug("In tnode_resize %p inflate_threshold=%d threshold=%d\n",
476 tn, inflate_threshold, halve_threshold);
478 /* No children */
479 if (tn->empty_children == tnode_child_length(tn)) {
480 tnode_free(tn);
481 return NULL;
483 /* One child */
484 if (tn->empty_children == tnode_child_length(tn) - 1)
485 for (i = 0; i < tnode_child_length(tn); i++) {
486 struct node *n;
488 n = tn->child[i];
489 if (!n)
490 continue;
492 /* compress one level */
493 node_set_parent(n, NULL);
494 tnode_free(tn);
495 return n;
498 * Double as long as the resulting node has a number of
499 * nonempty nodes that are above the threshold.
503 * From "Implementing a dynamic compressed trie" by Stefan Nilsson of
504 * the Helsinki University of Technology and Matti Tikkanen of Nokia
505 * Telecommunications, page 6:
506 * "A node is doubled if the ratio of non-empty children to all
507 * children in the *doubled* node is at least 'high'."
509 * 'high' in this instance is the variable 'inflate_threshold'. It
510 * is expressed as a percentage, so we multiply it with
511 * tnode_child_length() and instead of multiplying by 2 (since the
512 * child array will be doubled by inflate()) and multiplying
513 * the left-hand side by 100 (to handle the percentage thing) we
514 * multiply the left-hand side by 50.
516 * The left-hand side may look a bit weird: tnode_child_length(tn)
517 * - tn->empty_children is of course the number of non-null children
518 * in the current node. tn->full_children is the number of "full"
519 * children, that is non-null tnodes with a skip value of 0.
520 * All of those will be doubled in the resulting inflated tnode, so
521 * we just count them one extra time here.
523 * A clearer way to write this would be:
525 * to_be_doubled = tn->full_children;
526 * not_to_be_doubled = tnode_child_length(tn) - tn->empty_children -
527 * tn->full_children;
529 * new_child_length = tnode_child_length(tn) * 2;
531 * new_fill_factor = 100 * (not_to_be_doubled + 2*to_be_doubled) /
532 * new_child_length;
533 * if (new_fill_factor >= inflate_threshold)
535 * ...and so on, tho it would mess up the while () loop.
537 * anyway,
538 * 100 * (not_to_be_doubled + 2*to_be_doubled) / new_child_length >=
539 * inflate_threshold
541 * avoid a division:
542 * 100 * (not_to_be_doubled + 2*to_be_doubled) >=
543 * inflate_threshold * new_child_length
545 * expand not_to_be_doubled and to_be_doubled, and shorten:
546 * 100 * (tnode_child_length(tn) - tn->empty_children +
547 * tn->full_children) >= inflate_threshold * new_child_length
549 * expand new_child_length:
550 * 100 * (tnode_child_length(tn) - tn->empty_children +
551 * tn->full_children) >=
552 * inflate_threshold * tnode_child_length(tn) * 2
554 * shorten again:
555 * 50 * (tn->full_children + tnode_child_length(tn) -
556 * tn->empty_children) >= inflate_threshold *
557 * tnode_child_length(tn)
561 check_tnode(tn);
563 /* Keep root node larger */
565 if (!tn->parent)
566 inflate_threshold_use = inflate_threshold_root;
567 else
568 inflate_threshold_use = inflate_threshold;
570 err = 0;
571 max_resize = 10;
572 while ((tn->full_children > 0 && max_resize-- &&
573 50 * (tn->full_children + tnode_child_length(tn) - tn->empty_children) >=
574 inflate_threshold_use * tnode_child_length(tn))) {
576 old_tn = tn;
577 tn = inflate(t, tn);
578 if (IS_ERR(tn)) {
579 tn = old_tn;
580 #ifdef CONFIG_IP_FIB_TRIE_STATS
581 t->stats.resize_node_skipped++;
582 #endif
583 break;
587 if (max_resize < 0) {
588 if (!tn->parent)
589 printk(KERN_WARNING "Fix inflate_threshold_root. Now=%d size=%d bits\n",
590 inflate_threshold_root, tn->bits);
591 else
592 printk(KERN_WARNING "Fix inflate_threshold. Now=%d size=%d bits\n",
593 inflate_threshold, tn->bits);
596 check_tnode(tn);
599 * Halve as long as the number of empty children in this
600 * node is above threshold.
604 /* Keep root node larger */
606 if (!tn->parent)
607 halve_threshold_use = halve_threshold_root;
608 else
609 halve_threshold_use = halve_threshold;
611 err = 0;
612 max_resize = 10;
613 while (tn->bits > 1 && max_resize-- &&
614 100 * (tnode_child_length(tn) - tn->empty_children) <
615 halve_threshold_use * tnode_child_length(tn)) {
617 old_tn = tn;
618 tn = halve(t, tn);
619 if (IS_ERR(tn)) {
620 tn = old_tn;
621 #ifdef CONFIG_IP_FIB_TRIE_STATS
622 t->stats.resize_node_skipped++;
623 #endif
624 break;
628 if (max_resize < 0) {
629 if (!tn->parent)
630 printk(KERN_WARNING "Fix halve_threshold_root. Now=%d size=%d bits\n",
631 halve_threshold_root, tn->bits);
632 else
633 printk(KERN_WARNING "Fix halve_threshold. Now=%d size=%d bits\n",
634 halve_threshold, tn->bits);
637 /* Only one child remains */
638 if (tn->empty_children == tnode_child_length(tn) - 1)
639 for (i = 0; i < tnode_child_length(tn); i++) {
640 struct node *n;
642 n = tn->child[i];
643 if (!n)
644 continue;
646 /* compress one level */
648 node_set_parent(n, NULL);
649 tnode_free(tn);
650 return n;
653 return (struct node *) tn;
656 static struct tnode *inflate(struct trie *t, struct tnode *tn)
658 struct tnode *inode;
659 struct tnode *oldtnode = tn;
660 int olen = tnode_child_length(tn);
661 int i;
663 pr_debug("In inflate\n");
665 tn = tnode_new(oldtnode->key, oldtnode->pos, oldtnode->bits + 1);
667 if (!tn)
668 return ERR_PTR(-ENOMEM);
671 * Preallocate and store tnodes before the actual work so we
672 * don't get into an inconsistent state if memory allocation
673 * fails. In case of failure we return the oldnode and inflate
674 * of tnode is ignored.
677 for (i = 0; i < olen; i++) {
678 struct tnode *inode = (struct tnode *) tnode_get_child(oldtnode, i);
680 if (inode &&
681 IS_TNODE(inode) &&
682 inode->pos == oldtnode->pos + oldtnode->bits &&
683 inode->bits > 1) {
684 struct tnode *left, *right;
685 t_key m = ~0U << (KEYLENGTH - 1) >> inode->pos;
687 left = tnode_new(inode->key&(~m), inode->pos + 1,
688 inode->bits - 1);
689 if (!left)
690 goto nomem;
692 right = tnode_new(inode->key|m, inode->pos + 1,
693 inode->bits - 1);
695 if (!right) {
696 tnode_free(left);
697 goto nomem;
700 put_child(t, tn, 2*i, (struct node *) left);
701 put_child(t, tn, 2*i+1, (struct node *) right);
705 for (i = 0; i < olen; i++) {
706 struct node *node = tnode_get_child(oldtnode, i);
707 struct tnode *left, *right;
708 int size, j;
710 /* An empty child */
711 if (node == NULL)
712 continue;
714 /* A leaf or an internal node with skipped bits */
716 if (IS_LEAF(node) || ((struct tnode *) node)->pos >
717 tn->pos + tn->bits - 1) {
718 if (tkey_extract_bits(node->key, oldtnode->pos + oldtnode->bits,
719 1) == 0)
720 put_child(t, tn, 2*i, node);
721 else
722 put_child(t, tn, 2*i+1, node);
723 continue;
726 /* An internal node with two children */
727 inode = (struct tnode *) node;
729 if (inode->bits == 1) {
730 put_child(t, tn, 2*i, inode->child[0]);
731 put_child(t, tn, 2*i+1, inode->child[1]);
733 tnode_free(inode);
734 continue;
737 /* An internal node with more than two children */
739 /* We will replace this node 'inode' with two new
740 * ones, 'left' and 'right', each with half of the
741 * original children. The two new nodes will have
742 * a position one bit further down the key and this
743 * means that the "significant" part of their keys
744 * (see the discussion near the top of this file)
745 * will differ by one bit, which will be "0" in
746 * left's key and "1" in right's key. Since we are
747 * moving the key position by one step, the bit that
748 * we are moving away from - the bit at position
749 * (inode->pos) - is the one that will differ between
750 * left and right. So... we synthesize that bit in the
751 * two new keys.
752 * The mask 'm' below will be a single "one" bit at
753 * the position (inode->pos)
756 /* Use the old key, but set the new significant
757 * bit to zero.
760 left = (struct tnode *) tnode_get_child(tn, 2*i);
761 put_child(t, tn, 2*i, NULL);
763 BUG_ON(!left);
765 right = (struct tnode *) tnode_get_child(tn, 2*i+1);
766 put_child(t, tn, 2*i+1, NULL);
768 BUG_ON(!right);
770 size = tnode_child_length(left);
771 for (j = 0; j < size; j++) {
772 put_child(t, left, j, inode->child[j]);
773 put_child(t, right, j, inode->child[j + size]);
775 put_child(t, tn, 2*i, resize(t, left));
776 put_child(t, tn, 2*i+1, resize(t, right));
778 tnode_free(inode);
780 tnode_free(oldtnode);
781 return tn;
782 nomem:
784 int size = tnode_child_length(tn);
785 int j;
787 for (j = 0; j < size; j++)
788 if (tn->child[j])
789 tnode_free((struct tnode *)tn->child[j]);
791 tnode_free(tn);
793 return ERR_PTR(-ENOMEM);
797 static struct tnode *halve(struct trie *t, struct tnode *tn)
799 struct tnode *oldtnode = tn;
800 struct node *left, *right;
801 int i;
802 int olen = tnode_child_length(tn);
804 pr_debug("In halve\n");
806 tn = tnode_new(oldtnode->key, oldtnode->pos, oldtnode->bits - 1);
808 if (!tn)
809 return ERR_PTR(-ENOMEM);
812 * Preallocate and store tnodes before the actual work so we
813 * don't get into an inconsistent state if memory allocation
814 * fails. In case of failure we return the oldnode and halve
815 * of tnode is ignored.
818 for (i = 0; i < olen; i += 2) {
819 left = tnode_get_child(oldtnode, i);
820 right = tnode_get_child(oldtnode, i+1);
822 /* Two nonempty children */
823 if (left && right) {
824 struct tnode *newn;
826 newn = tnode_new(left->key, tn->pos + tn->bits, 1);
828 if (!newn)
829 goto nomem;
831 put_child(t, tn, i/2, (struct node *)newn);
836 for (i = 0; i < olen; i += 2) {
837 struct tnode *newBinNode;
839 left = tnode_get_child(oldtnode, i);
840 right = tnode_get_child(oldtnode, i+1);
842 /* At least one of the children is empty */
843 if (left == NULL) {
844 if (right == NULL) /* Both are empty */
845 continue;
846 put_child(t, tn, i/2, right);
847 continue;
850 if (right == NULL) {
851 put_child(t, tn, i/2, left);
852 continue;
855 /* Two nonempty children */
856 newBinNode = (struct tnode *) tnode_get_child(tn, i/2);
857 put_child(t, tn, i/2, NULL);
858 put_child(t, newBinNode, 0, left);
859 put_child(t, newBinNode, 1, right);
860 put_child(t, tn, i/2, resize(t, newBinNode));
862 tnode_free(oldtnode);
863 return tn;
864 nomem:
866 int size = tnode_child_length(tn);
867 int j;
869 for (j = 0; j < size; j++)
870 if (tn->child[j])
871 tnode_free((struct tnode *)tn->child[j]);
873 tnode_free(tn);
875 return ERR_PTR(-ENOMEM);
879 static void trie_init(struct trie *t)
881 if (!t)
882 return;
884 t->size = 0;
885 rcu_assign_pointer(t->trie, NULL);
886 t->revision = 0;
887 #ifdef CONFIG_IP_FIB_TRIE_STATS
888 memset(&t->stats, 0, sizeof(struct trie_use_stats));
889 #endif
892 /* readside must use rcu_read_lock currently dump routines
893 via get_fa_head and dump */
895 static struct leaf_info *find_leaf_info(struct leaf *l, int plen)
897 struct hlist_head *head = &l->list;
898 struct hlist_node *node;
899 struct leaf_info *li;
901 hlist_for_each_entry_rcu(li, node, head, hlist)
902 if (li->plen == plen)
903 return li;
905 return NULL;
908 static inline struct list_head * get_fa_head(struct leaf *l, int plen)
910 struct leaf_info *li = find_leaf_info(l, plen);
912 if (!li)
913 return NULL;
915 return &li->falh;
918 static void insert_leaf_info(struct hlist_head *head, struct leaf_info *new)
920 struct leaf_info *li = NULL, *last = NULL;
921 struct hlist_node *node;
923 if (hlist_empty(head)) {
924 hlist_add_head_rcu(&new->hlist, head);
925 } else {
926 hlist_for_each_entry(li, node, head, hlist) {
927 if (new->plen > li->plen)
928 break;
930 last = li;
932 if (last)
933 hlist_add_after_rcu(&last->hlist, &new->hlist);
934 else
935 hlist_add_before_rcu(&new->hlist, &li->hlist);
939 /* rcu_read_lock needs to be hold by caller from readside */
941 static struct leaf *
942 fib_find_node(struct trie *t, u32 key)
944 int pos;
945 struct tnode *tn;
946 struct node *n;
948 pos = 0;
949 n = rcu_dereference(t->trie);
951 while (n != NULL && NODE_TYPE(n) == T_TNODE) {
952 tn = (struct tnode *) n;
954 check_tnode(tn);
956 if (tkey_sub_equals(tn->key, pos, tn->pos-pos, key)) {
957 pos = tn->pos + tn->bits;
958 n = tnode_get_child(tn, tkey_extract_bits(key, tn->pos, tn->bits));
959 } else
960 break;
962 /* Case we have found a leaf. Compare prefixes */
964 if (n != NULL && IS_LEAF(n) && tkey_equals(key, n->key))
965 return (struct leaf *)n;
967 return NULL;
970 static struct node *trie_rebalance(struct trie *t, struct tnode *tn)
972 int wasfull;
973 t_key cindex, key = tn->key;
974 struct tnode *tp;
976 while (tn != NULL && (tp = node_parent((struct node *)tn)) != NULL) {
977 cindex = tkey_extract_bits(key, tp->pos, tp->bits);
978 wasfull = tnode_full(tp, tnode_get_child(tp, cindex));
979 tn = (struct tnode *) resize (t, (struct tnode *)tn);
980 tnode_put_child_reorg((struct tnode *)tp, cindex,(struct node*)tn, wasfull);
982 tp = node_parent((struct node *) tn);
983 if (!tp)
984 break;
985 tn = tp;
988 /* Handle last (top) tnode */
989 if (IS_TNODE(tn))
990 tn = (struct tnode*) resize(t, (struct tnode *)tn);
992 return (struct node*) tn;
995 /* only used from updater-side */
997 static struct list_head *
998 fib_insert_node(struct trie *t, int *err, u32 key, int plen)
1000 int pos, newpos;
1001 struct tnode *tp = NULL, *tn = NULL;
1002 struct node *n;
1003 struct leaf *l;
1004 int missbit;
1005 struct list_head *fa_head = NULL;
1006 struct leaf_info *li;
1007 t_key cindex;
1009 pos = 0;
1010 n = t->trie;
1012 /* If we point to NULL, stop. Either the tree is empty and we should
1013 * just put a new leaf in if, or we have reached an empty child slot,
1014 * and we should just put our new leaf in that.
1015 * If we point to a T_TNODE, check if it matches our key. Note that
1016 * a T_TNODE might be skipping any number of bits - its 'pos' need
1017 * not be the parent's 'pos'+'bits'!
1019 * If it does match the current key, get pos/bits from it, extract
1020 * the index from our key, push the T_TNODE and walk the tree.
1022 * If it doesn't, we have to replace it with a new T_TNODE.
1024 * If we point to a T_LEAF, it might or might not have the same key
1025 * as we do. If it does, just change the value, update the T_LEAF's
1026 * value, and return it.
1027 * If it doesn't, we need to replace it with a T_TNODE.
1030 while (n != NULL && NODE_TYPE(n) == T_TNODE) {
1031 tn = (struct tnode *) n;
1033 check_tnode(tn);
1035 if (tkey_sub_equals(tn->key, pos, tn->pos-pos, key)) {
1036 tp = tn;
1037 pos = tn->pos + tn->bits;
1038 n = tnode_get_child(tn, tkey_extract_bits(key, tn->pos, tn->bits));
1040 BUG_ON(n && node_parent(n) != tn);
1041 } else
1042 break;
1046 * n ----> NULL, LEAF or TNODE
1048 * tp is n's (parent) ----> NULL or TNODE
1051 BUG_ON(tp && IS_LEAF(tp));
1053 /* Case 1: n is a leaf. Compare prefixes */
1055 if (n != NULL && IS_LEAF(n) && tkey_equals(key, n->key)) {
1056 struct leaf *l = (struct leaf *) n;
1058 li = leaf_info_new(plen);
1060 if (!li) {
1061 *err = -ENOMEM;
1062 goto err;
1065 fa_head = &li->falh;
1066 insert_leaf_info(&l->list, li);
1067 goto done;
1069 t->size++;
1070 l = leaf_new();
1072 if (!l) {
1073 *err = -ENOMEM;
1074 goto err;
1077 l->key = key;
1078 li = leaf_info_new(plen);
1080 if (!li) {
1081 tnode_free((struct tnode *) l);
1082 *err = -ENOMEM;
1083 goto err;
1086 fa_head = &li->falh;
1087 insert_leaf_info(&l->list, li);
1089 if (t->trie && n == NULL) {
1090 /* Case 2: n is NULL, and will just insert a new leaf */
1092 node_set_parent((struct node *)l, tp);
1094 cindex = tkey_extract_bits(key, tp->pos, tp->bits);
1095 put_child(t, (struct tnode *)tp, cindex, (struct node *)l);
1096 } else {
1097 /* Case 3: n is a LEAF or a TNODE and the key doesn't match. */
1099 * Add a new tnode here
1100 * first tnode need some special handling
1103 if (tp)
1104 pos = tp->pos+tp->bits;
1105 else
1106 pos = 0;
1108 if (n) {
1109 newpos = tkey_mismatch(key, pos, n->key);
1110 tn = tnode_new(n->key, newpos, 1);
1111 } else {
1112 newpos = 0;
1113 tn = tnode_new(key, newpos, 1); /* First tnode */
1116 if (!tn) {
1117 free_leaf_info(li);
1118 tnode_free((struct tnode *) l);
1119 *err = -ENOMEM;
1120 goto err;
1123 node_set_parent((struct node *)tn, tp);
1125 missbit = tkey_extract_bits(key, newpos, 1);
1126 put_child(t, tn, missbit, (struct node *)l);
1127 put_child(t, tn, 1-missbit, n);
1129 if (tp) {
1130 cindex = tkey_extract_bits(key, tp->pos, tp->bits);
1131 put_child(t, (struct tnode *)tp, cindex, (struct node *)tn);
1132 } else {
1133 rcu_assign_pointer(t->trie, (struct node *)tn); /* First tnode */
1134 tp = tn;
1138 if (tp && tp->pos + tp->bits > 32)
1139 printk(KERN_WARNING "fib_trie tp=%p pos=%d, bits=%d, key=%0x plen=%d\n",
1140 tp, tp->pos, tp->bits, key, plen);
1142 /* Rebalance the trie */
1144 rcu_assign_pointer(t->trie, trie_rebalance(t, tp));
1145 done:
1146 t->revision++;
1147 err:
1148 return fa_head;
1152 * Caller must hold RTNL.
1154 static int fn_trie_insert(struct fib_table *tb, struct fib_config *cfg)
1156 struct trie *t = (struct trie *) tb->tb_data;
1157 struct fib_alias *fa, *new_fa;
1158 struct list_head *fa_head = NULL;
1159 struct fib_info *fi;
1160 int plen = cfg->fc_dst_len;
1161 u8 tos = cfg->fc_tos;
1162 u32 key, mask;
1163 int err;
1164 struct leaf *l;
1166 if (plen > 32)
1167 return -EINVAL;
1169 key = ntohl(cfg->fc_dst);
1171 pr_debug("Insert table=%u %08x/%d\n", tb->tb_id, key, plen);
1173 mask = ntohl(inet_make_mask(plen));
1175 if (key & ~mask)
1176 return -EINVAL;
1178 key = key & mask;
1180 fi = fib_create_info(cfg);
1181 if (IS_ERR(fi)) {
1182 err = PTR_ERR(fi);
1183 goto err;
1186 l = fib_find_node(t, key);
1187 fa = NULL;
1189 if (l) {
1190 fa_head = get_fa_head(l, plen);
1191 fa = fib_find_alias(fa_head, tos, fi->fib_priority);
1194 /* Now fa, if non-NULL, points to the first fib alias
1195 * with the same keys [prefix,tos,priority], if such key already
1196 * exists or to the node before which we will insert new one.
1198 * If fa is NULL, we will need to allocate a new one and
1199 * insert to the head of f.
1201 * If f is NULL, no fib node matched the destination key
1202 * and we need to allocate a new one of those as well.
1205 if (fa && fa->fa_info->fib_priority == fi->fib_priority) {
1206 struct fib_alias *fa_orig;
1208 err = -EEXIST;
1209 if (cfg->fc_nlflags & NLM_F_EXCL)
1210 goto out;
1212 if (cfg->fc_nlflags & NLM_F_REPLACE) {
1213 struct fib_info *fi_drop;
1214 u8 state;
1216 err = -ENOBUFS;
1217 new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
1218 if (new_fa == NULL)
1219 goto out;
1221 fi_drop = fa->fa_info;
1222 new_fa->fa_tos = fa->fa_tos;
1223 new_fa->fa_info = fi;
1224 new_fa->fa_type = cfg->fc_type;
1225 new_fa->fa_scope = cfg->fc_scope;
1226 state = fa->fa_state;
1227 new_fa->fa_state &= ~FA_S_ACCESSED;
1229 list_replace_rcu(&fa->fa_list, &new_fa->fa_list);
1230 alias_free_mem_rcu(fa);
1232 fib_release_info(fi_drop);
1233 if (state & FA_S_ACCESSED)
1234 rt_cache_flush(-1);
1235 rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen,
1236 tb->tb_id, &cfg->fc_nlinfo, NLM_F_REPLACE);
1238 goto succeeded;
1240 /* Error if we find a perfect match which
1241 * uses the same scope, type, and nexthop
1242 * information.
1244 fa_orig = fa;
1245 list_for_each_entry(fa, fa_orig->fa_list.prev, fa_list) {
1246 if (fa->fa_tos != tos)
1247 break;
1248 if (fa->fa_info->fib_priority != fi->fib_priority)
1249 break;
1250 if (fa->fa_type == cfg->fc_type &&
1251 fa->fa_scope == cfg->fc_scope &&
1252 fa->fa_info == fi) {
1253 goto out;
1256 if (!(cfg->fc_nlflags & NLM_F_APPEND))
1257 fa = fa_orig;
1259 err = -ENOENT;
1260 if (!(cfg->fc_nlflags & NLM_F_CREATE))
1261 goto out;
1263 err = -ENOBUFS;
1264 new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
1265 if (new_fa == NULL)
1266 goto out;
1268 new_fa->fa_info = fi;
1269 new_fa->fa_tos = tos;
1270 new_fa->fa_type = cfg->fc_type;
1271 new_fa->fa_scope = cfg->fc_scope;
1272 new_fa->fa_state = 0;
1274 * Insert new entry to the list.
1277 if (!fa_head) {
1278 err = 0;
1279 fa_head = fib_insert_node(t, &err, key, plen);
1280 if (err)
1281 goto out_free_new_fa;
1284 list_add_tail_rcu(&new_fa->fa_list,
1285 (fa ? &fa->fa_list : fa_head));
1287 rt_cache_flush(-1);
1288 rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, tb->tb_id,
1289 &cfg->fc_nlinfo, 0);
1290 succeeded:
1291 return 0;
1293 out_free_new_fa:
1294 kmem_cache_free(fn_alias_kmem, new_fa);
1295 out:
1296 fib_release_info(fi);
1297 err:
1298 return err;
1302 /* should be called with rcu_read_lock */
1303 static inline int check_leaf(struct trie *t, struct leaf *l,
1304 t_key key, int *plen, const struct flowi *flp,
1305 struct fib_result *res)
1307 int err, i;
1308 __be32 mask;
1309 struct leaf_info *li;
1310 struct hlist_head *hhead = &l->list;
1311 struct hlist_node *node;
1313 hlist_for_each_entry_rcu(li, node, hhead, hlist) {
1314 i = li->plen;
1315 mask = inet_make_mask(i);
1316 if (l->key != (key & ntohl(mask)))
1317 continue;
1319 if ((err = fib_semantic_match(&li->falh, flp, res, htonl(l->key), mask, i)) <= 0) {
1320 *plen = i;
1321 #ifdef CONFIG_IP_FIB_TRIE_STATS
1322 t->stats.semantic_match_passed++;
1323 #endif
1324 return err;
1326 #ifdef CONFIG_IP_FIB_TRIE_STATS
1327 t->stats.semantic_match_miss++;
1328 #endif
1330 return 1;
1333 static int
1334 fn_trie_lookup(struct fib_table *tb, const struct flowi *flp, struct fib_result *res)
1336 struct trie *t = (struct trie *) tb->tb_data;
1337 int plen, ret = 0;
1338 struct node *n;
1339 struct tnode *pn;
1340 int pos, bits;
1341 t_key key = ntohl(flp->fl4_dst);
1342 int chopped_off;
1343 t_key cindex = 0;
1344 int current_prefix_length = KEYLENGTH;
1345 struct tnode *cn;
1346 t_key node_prefix, key_prefix, pref_mismatch;
1347 int mp;
1349 rcu_read_lock();
1351 n = rcu_dereference(t->trie);
1352 if (!n)
1353 goto failed;
1355 #ifdef CONFIG_IP_FIB_TRIE_STATS
1356 t->stats.gets++;
1357 #endif
1359 /* Just a leaf? */
1360 if (IS_LEAF(n)) {
1361 if ((ret = check_leaf(t, (struct leaf *)n, key, &plen, flp, res)) <= 0)
1362 goto found;
1363 goto failed;
1365 pn = (struct tnode *) n;
1366 chopped_off = 0;
1368 while (pn) {
1369 pos = pn->pos;
1370 bits = pn->bits;
1372 if (!chopped_off)
1373 cindex = tkey_extract_bits(mask_pfx(key, current_prefix_length),
1374 pos, bits);
1376 n = tnode_get_child(pn, cindex);
1378 if (n == NULL) {
1379 #ifdef CONFIG_IP_FIB_TRIE_STATS
1380 t->stats.null_node_hit++;
1381 #endif
1382 goto backtrace;
1385 if (IS_LEAF(n)) {
1386 if ((ret = check_leaf(t, (struct leaf *)n, key, &plen, flp, res)) <= 0)
1387 goto found;
1388 else
1389 goto backtrace;
1392 #define HL_OPTIMIZE
1393 #ifdef HL_OPTIMIZE
1394 cn = (struct tnode *)n;
1397 * It's a tnode, and we can do some extra checks here if we
1398 * like, to avoid descending into a dead-end branch.
1399 * This tnode is in the parent's child array at index
1400 * key[p_pos..p_pos+p_bits] but potentially with some bits
1401 * chopped off, so in reality the index may be just a
1402 * subprefix, padded with zero at the end.
1403 * We can also take a look at any skipped bits in this
1404 * tnode - everything up to p_pos is supposed to be ok,
1405 * and the non-chopped bits of the index (se previous
1406 * paragraph) are also guaranteed ok, but the rest is
1407 * considered unknown.
1409 * The skipped bits are key[pos+bits..cn->pos].
1412 /* If current_prefix_length < pos+bits, we are already doing
1413 * actual prefix matching, which means everything from
1414 * pos+(bits-chopped_off) onward must be zero along some
1415 * branch of this subtree - otherwise there is *no* valid
1416 * prefix present. Here we can only check the skipped
1417 * bits. Remember, since we have already indexed into the
1418 * parent's child array, we know that the bits we chopped of
1419 * *are* zero.
1422 /* NOTA BENE: CHECKING ONLY SKIPPED BITS FOR THE NEW NODE HERE */
1424 if (current_prefix_length < pos+bits) {
1425 if (tkey_extract_bits(cn->key, current_prefix_length,
1426 cn->pos - current_prefix_length) != 0 ||
1427 !(cn->child[0]))
1428 goto backtrace;
1432 * If chopped_off=0, the index is fully validated and we
1433 * only need to look at the skipped bits for this, the new,
1434 * tnode. What we actually want to do is to find out if
1435 * these skipped bits match our key perfectly, or if we will
1436 * have to count on finding a matching prefix further down,
1437 * because if we do, we would like to have some way of
1438 * verifying the existence of such a prefix at this point.
1441 /* The only thing we can do at this point is to verify that
1442 * any such matching prefix can indeed be a prefix to our
1443 * key, and if the bits in the node we are inspecting that
1444 * do not match our key are not ZERO, this cannot be true.
1445 * Thus, find out where there is a mismatch (before cn->pos)
1446 * and verify that all the mismatching bits are zero in the
1447 * new tnode's key.
1450 /* Note: We aren't very concerned about the piece of the key
1451 * that precede pn->pos+pn->bits, since these have already been
1452 * checked. The bits after cn->pos aren't checked since these are
1453 * by definition "unknown" at this point. Thus, what we want to
1454 * see is if we are about to enter the "prefix matching" state,
1455 * and in that case verify that the skipped bits that will prevail
1456 * throughout this subtree are zero, as they have to be if we are
1457 * to find a matching prefix.
1460 node_prefix = mask_pfx(cn->key, cn->pos);
1461 key_prefix = mask_pfx(key, cn->pos);
1462 pref_mismatch = key_prefix^node_prefix;
1463 mp = 0;
1465 /* In short: If skipped bits in this node do not match the search
1466 * key, enter the "prefix matching" state.directly.
1468 if (pref_mismatch) {
1469 while (!(pref_mismatch & (1<<(KEYLENGTH-1)))) {
1470 mp++;
1471 pref_mismatch = pref_mismatch <<1;
1473 key_prefix = tkey_extract_bits(cn->key, mp, cn->pos-mp);
1475 if (key_prefix != 0)
1476 goto backtrace;
1478 if (current_prefix_length >= cn->pos)
1479 current_prefix_length = mp;
1481 #endif
1482 pn = (struct tnode *)n; /* Descend */
1483 chopped_off = 0;
1484 continue;
1486 backtrace:
1487 chopped_off++;
1489 /* As zero don't change the child key (cindex) */
1490 while ((chopped_off <= pn->bits) && !(cindex & (1<<(chopped_off-1))))
1491 chopped_off++;
1493 /* Decrease current_... with bits chopped off */
1494 if (current_prefix_length > pn->pos + pn->bits - chopped_off)
1495 current_prefix_length = pn->pos + pn->bits - chopped_off;
1498 * Either we do the actual chop off according or if we have
1499 * chopped off all bits in this tnode walk up to our parent.
1502 if (chopped_off <= pn->bits) {
1503 cindex &= ~(1 << (chopped_off-1));
1504 } else {
1505 struct tnode *parent = node_parent((struct node *) pn);
1506 if (!parent)
1507 goto failed;
1509 /* Get Child's index */
1510 cindex = tkey_extract_bits(pn->key, parent->pos, parent->bits);
1511 pn = parent;
1512 chopped_off = 0;
1514 #ifdef CONFIG_IP_FIB_TRIE_STATS
1515 t->stats.backtrack++;
1516 #endif
1517 goto backtrace;
1520 failed:
1521 ret = 1;
1522 found:
1523 rcu_read_unlock();
1524 return ret;
1527 /* only called from updater side */
1528 static int trie_leaf_remove(struct trie *t, t_key key)
1530 t_key cindex;
1531 struct tnode *tp = NULL;
1532 struct node *n = t->trie;
1533 struct leaf *l;
1535 pr_debug("entering trie_leaf_remove(%p)\n", n);
1537 /* Note that in the case skipped bits, those bits are *not* checked!
1538 * When we finish this, we will have NULL or a T_LEAF, and the
1539 * T_LEAF may or may not match our key.
1542 while (n != NULL && IS_TNODE(n)) {
1543 struct tnode *tn = (struct tnode *) n;
1544 check_tnode(tn);
1545 n = tnode_get_child(tn ,tkey_extract_bits(key, tn->pos, tn->bits));
1547 BUG_ON(n && node_parent(n) != tn);
1549 l = (struct leaf *) n;
1551 if (!n || !tkey_equals(l->key, key))
1552 return 0;
1555 * Key found.
1556 * Remove the leaf and rebalance the tree
1559 t->revision++;
1560 t->size--;
1562 tp = node_parent(n);
1563 tnode_free((struct tnode *) n);
1565 if (tp) {
1566 cindex = tkey_extract_bits(key, tp->pos, tp->bits);
1567 put_child(t, (struct tnode *)tp, cindex, NULL);
1568 rcu_assign_pointer(t->trie, trie_rebalance(t, tp));
1569 } else
1570 rcu_assign_pointer(t->trie, NULL);
1572 return 1;
1576 * Caller must hold RTNL.
1578 static int fn_trie_delete(struct fib_table *tb, struct fib_config *cfg)
1580 struct trie *t = (struct trie *) tb->tb_data;
1581 u32 key, mask;
1582 int plen = cfg->fc_dst_len;
1583 u8 tos = cfg->fc_tos;
1584 struct fib_alias *fa, *fa_to_delete;
1585 struct list_head *fa_head;
1586 struct leaf *l;
1587 struct leaf_info *li;
1589 if (plen > 32)
1590 return -EINVAL;
1592 key = ntohl(cfg->fc_dst);
1593 mask = ntohl(inet_make_mask(plen));
1595 if (key & ~mask)
1596 return -EINVAL;
1598 key = key & mask;
1599 l = fib_find_node(t, key);
1601 if (!l)
1602 return -ESRCH;
1604 fa_head = get_fa_head(l, plen);
1605 fa = fib_find_alias(fa_head, tos, 0);
1607 if (!fa)
1608 return -ESRCH;
1610 pr_debug("Deleting %08x/%d tos=%d t=%p\n", key, plen, tos, t);
1612 fa_to_delete = NULL;
1613 fa_head = fa->fa_list.prev;
1615 list_for_each_entry(fa, fa_head, fa_list) {
1616 struct fib_info *fi = fa->fa_info;
1618 if (fa->fa_tos != tos)
1619 break;
1621 if ((!cfg->fc_type || fa->fa_type == cfg->fc_type) &&
1622 (cfg->fc_scope == RT_SCOPE_NOWHERE ||
1623 fa->fa_scope == cfg->fc_scope) &&
1624 (!cfg->fc_protocol ||
1625 fi->fib_protocol == cfg->fc_protocol) &&
1626 fib_nh_match(cfg, fi) == 0) {
1627 fa_to_delete = fa;
1628 break;
1632 if (!fa_to_delete)
1633 return -ESRCH;
1635 fa = fa_to_delete;
1636 rtmsg_fib(RTM_DELROUTE, htonl(key), fa, plen, tb->tb_id,
1637 &cfg->fc_nlinfo, 0);
1639 l = fib_find_node(t, key);
1640 li = find_leaf_info(l, plen);
1642 list_del_rcu(&fa->fa_list);
1644 if (list_empty(fa_head)) {
1645 hlist_del_rcu(&li->hlist);
1646 free_leaf_info(li);
1649 if (hlist_empty(&l->list))
1650 trie_leaf_remove(t, key);
1652 if (fa->fa_state & FA_S_ACCESSED)
1653 rt_cache_flush(-1);
1655 fib_release_info(fa->fa_info);
1656 alias_free_mem_rcu(fa);
1657 return 0;
1660 static int trie_flush_list(struct trie *t, struct list_head *head)
1662 struct fib_alias *fa, *fa_node;
1663 int found = 0;
1665 list_for_each_entry_safe(fa, fa_node, head, fa_list) {
1666 struct fib_info *fi = fa->fa_info;
1668 if (fi && (fi->fib_flags & RTNH_F_DEAD)) {
1669 list_del_rcu(&fa->fa_list);
1670 fib_release_info(fa->fa_info);
1671 alias_free_mem_rcu(fa);
1672 found++;
1675 return found;
1678 static int trie_flush_leaf(struct trie *t, struct leaf *l)
1680 int found = 0;
1681 struct hlist_head *lih = &l->list;
1682 struct hlist_node *node, *tmp;
1683 struct leaf_info *li = NULL;
1685 hlist_for_each_entry_safe(li, node, tmp, lih, hlist) {
1686 found += trie_flush_list(t, &li->falh);
1688 if (list_empty(&li->falh)) {
1689 hlist_del_rcu(&li->hlist);
1690 free_leaf_info(li);
1693 return found;
1696 /* rcu_read_lock needs to be hold by caller from readside */
1698 static struct leaf *nextleaf(struct trie *t, struct leaf *thisleaf)
1700 struct node *c = (struct node *) thisleaf;
1701 struct tnode *p;
1702 int idx;
1703 struct node *trie = rcu_dereference(t->trie);
1705 if (c == NULL) {
1706 if (trie == NULL)
1707 return NULL;
1709 if (IS_LEAF(trie)) /* trie w. just a leaf */
1710 return (struct leaf *) trie;
1712 p = (struct tnode*) trie; /* Start */
1713 } else
1714 p = node_parent(c);
1716 while (p) {
1717 int pos, last;
1719 /* Find the next child of the parent */
1720 if (c)
1721 pos = 1 + tkey_extract_bits(c->key, p->pos, p->bits);
1722 else
1723 pos = 0;
1725 last = 1 << p->bits;
1726 for (idx = pos; idx < last ; idx++) {
1727 c = rcu_dereference(p->child[idx]);
1729 if (!c)
1730 continue;
1732 /* Decend if tnode */
1733 while (IS_TNODE(c)) {
1734 p = (struct tnode *) c;
1735 idx = 0;
1737 /* Rightmost non-NULL branch */
1738 if (p && IS_TNODE(p))
1739 while (!(c = rcu_dereference(p->child[idx]))
1740 && idx < (1<<p->bits)) idx++;
1742 /* Done with this tnode? */
1743 if (idx >= (1 << p->bits) || !c)
1744 goto up;
1746 return (struct leaf *) c;
1749 /* No more children go up one step */
1750 c = (struct node *) p;
1751 p = node_parent(c);
1753 return NULL; /* Ready. Root of trie */
1757 * Caller must hold RTNL.
1759 static int fn_trie_flush(struct fib_table *tb)
1761 struct trie *t = (struct trie *) tb->tb_data;
1762 struct leaf *ll = NULL, *l = NULL;
1763 int found = 0, h;
1765 t->revision++;
1767 for (h = 0; (l = nextleaf(t, l)) != NULL; h++) {
1768 found += trie_flush_leaf(t, l);
1770 if (ll && hlist_empty(&ll->list))
1771 trie_leaf_remove(t, ll->key);
1772 ll = l;
1775 if (ll && hlist_empty(&ll->list))
1776 trie_leaf_remove(t, ll->key);
1778 pr_debug("trie_flush found=%d\n", found);
1779 return found;
1782 static int trie_last_dflt = -1;
1784 static void
1785 fn_trie_select_default(struct fib_table *tb, const struct flowi *flp, struct fib_result *res)
1787 struct trie *t = (struct trie *) tb->tb_data;
1788 int order, last_idx;
1789 struct fib_info *fi = NULL;
1790 struct fib_info *last_resort;
1791 struct fib_alias *fa = NULL;
1792 struct list_head *fa_head;
1793 struct leaf *l;
1795 last_idx = -1;
1796 last_resort = NULL;
1797 order = -1;
1799 rcu_read_lock();
1801 l = fib_find_node(t, 0);
1802 if (!l)
1803 goto out;
1805 fa_head = get_fa_head(l, 0);
1806 if (!fa_head)
1807 goto out;
1809 if (list_empty(fa_head))
1810 goto out;
1812 list_for_each_entry_rcu(fa, fa_head, fa_list) {
1813 struct fib_info *next_fi = fa->fa_info;
1815 if (fa->fa_scope != res->scope ||
1816 fa->fa_type != RTN_UNICAST)
1817 continue;
1819 if (next_fi->fib_priority > res->fi->fib_priority)
1820 break;
1821 if (!next_fi->fib_nh[0].nh_gw ||
1822 next_fi->fib_nh[0].nh_scope != RT_SCOPE_LINK)
1823 continue;
1824 fa->fa_state |= FA_S_ACCESSED;
1826 if (fi == NULL) {
1827 if (next_fi != res->fi)
1828 break;
1829 } else if (!fib_detect_death(fi, order, &last_resort,
1830 &last_idx, &trie_last_dflt)) {
1831 if (res->fi)
1832 fib_info_put(res->fi);
1833 res->fi = fi;
1834 atomic_inc(&fi->fib_clntref);
1835 trie_last_dflt = order;
1836 goto out;
1838 fi = next_fi;
1839 order++;
1841 if (order <= 0 || fi == NULL) {
1842 trie_last_dflt = -1;
1843 goto out;
1846 if (!fib_detect_death(fi, order, &last_resort, &last_idx, &trie_last_dflt)) {
1847 if (res->fi)
1848 fib_info_put(res->fi);
1849 res->fi = fi;
1850 atomic_inc(&fi->fib_clntref);
1851 trie_last_dflt = order;
1852 goto out;
1854 if (last_idx >= 0) {
1855 if (res->fi)
1856 fib_info_put(res->fi);
1857 res->fi = last_resort;
1858 if (last_resort)
1859 atomic_inc(&last_resort->fib_clntref);
1861 trie_last_dflt = last_idx;
1862 out:;
1863 rcu_read_unlock();
1866 static int fn_trie_dump_fa(t_key key, int plen, struct list_head *fah, struct fib_table *tb,
1867 struct sk_buff *skb, struct netlink_callback *cb)
1869 int i, s_i;
1870 struct fib_alias *fa;
1872 __be32 xkey = htonl(key);
1874 s_i = cb->args[4];
1875 i = 0;
1877 /* rcu_read_lock is hold by caller */
1879 list_for_each_entry_rcu(fa, fah, fa_list) {
1880 if (i < s_i) {
1881 i++;
1882 continue;
1884 BUG_ON(!fa->fa_info);
1886 if (fib_dump_info(skb, NETLINK_CB(cb->skb).pid,
1887 cb->nlh->nlmsg_seq,
1888 RTM_NEWROUTE,
1889 tb->tb_id,
1890 fa->fa_type,
1891 fa->fa_scope,
1892 xkey,
1893 plen,
1894 fa->fa_tos,
1895 fa->fa_info, 0) < 0) {
1896 cb->args[4] = i;
1897 return -1;
1899 i++;
1901 cb->args[4] = i;
1902 return skb->len;
1905 static int fn_trie_dump_plen(struct trie *t, int plen, struct fib_table *tb, struct sk_buff *skb,
1906 struct netlink_callback *cb)
1908 int h, s_h;
1909 struct list_head *fa_head;
1910 struct leaf *l = NULL;
1912 s_h = cb->args[3];
1914 for (h = 0; (l = nextleaf(t, l)) != NULL; h++) {
1915 if (h < s_h)
1916 continue;
1917 if (h > s_h)
1918 memset(&cb->args[4], 0,
1919 sizeof(cb->args) - 4*sizeof(cb->args[0]));
1921 fa_head = get_fa_head(l, plen);
1923 if (!fa_head)
1924 continue;
1926 if (list_empty(fa_head))
1927 continue;
1929 if (fn_trie_dump_fa(l->key, plen, fa_head, tb, skb, cb)<0) {
1930 cb->args[3] = h;
1931 return -1;
1934 cb->args[3] = h;
1935 return skb->len;
1938 static int fn_trie_dump(struct fib_table *tb, struct sk_buff *skb, struct netlink_callback *cb)
1940 int m, s_m;
1941 struct trie *t = (struct trie *) tb->tb_data;
1943 s_m = cb->args[2];
1945 rcu_read_lock();
1946 for (m = 0; m <= 32; m++) {
1947 if (m < s_m)
1948 continue;
1949 if (m > s_m)
1950 memset(&cb->args[3], 0,
1951 sizeof(cb->args) - 3*sizeof(cb->args[0]));
1953 if (fn_trie_dump_plen(t, 32-m, tb, skb, cb)<0) {
1954 cb->args[2] = m;
1955 goto out;
1958 rcu_read_unlock();
1959 cb->args[2] = m;
1960 return skb->len;
1961 out:
1962 rcu_read_unlock();
1963 return -1;
1966 /* Fix more generic FIB names for init later */
1968 #ifdef CONFIG_IP_MULTIPLE_TABLES
1969 struct fib_table * fib_hash_init(u32 id)
1970 #else
1971 struct fib_table * __init fib_hash_init(u32 id)
1972 #endif
1974 struct fib_table *tb;
1975 struct trie *t;
1977 if (fn_alias_kmem == NULL)
1978 fn_alias_kmem = kmem_cache_create("ip_fib_alias",
1979 sizeof(struct fib_alias),
1980 0, SLAB_HWCACHE_ALIGN,
1981 NULL);
1983 tb = kmalloc(sizeof(struct fib_table) + sizeof(struct trie),
1984 GFP_KERNEL);
1985 if (tb == NULL)
1986 return NULL;
1988 tb->tb_id = id;
1989 tb->tb_lookup = fn_trie_lookup;
1990 tb->tb_insert = fn_trie_insert;
1991 tb->tb_delete = fn_trie_delete;
1992 tb->tb_flush = fn_trie_flush;
1993 tb->tb_select_default = fn_trie_select_default;
1994 tb->tb_dump = fn_trie_dump;
1995 memset(tb->tb_data, 0, sizeof(struct trie));
1997 t = (struct trie *) tb->tb_data;
1999 trie_init(t);
2001 if (id == RT_TABLE_LOCAL)
2002 trie_local = t;
2003 else if (id == RT_TABLE_MAIN)
2004 trie_main = t;
2006 if (id == RT_TABLE_LOCAL)
2007 printk(KERN_INFO "IPv4 FIB: Using LC-trie version %s\n", VERSION);
2009 return tb;
2012 #ifdef CONFIG_PROC_FS
2013 /* Depth first Trie walk iterator */
2014 struct fib_trie_iter {
2015 struct tnode *tnode;
2016 struct trie *trie;
2017 unsigned index;
2018 unsigned depth;
2021 static struct node *fib_trie_get_next(struct fib_trie_iter *iter)
2023 struct tnode *tn = iter->tnode;
2024 unsigned cindex = iter->index;
2025 struct tnode *p;
2027 /* A single entry routing table */
2028 if (!tn)
2029 return NULL;
2031 pr_debug("get_next iter={node=%p index=%d depth=%d}\n",
2032 iter->tnode, iter->index, iter->depth);
2033 rescan:
2034 while (cindex < (1<<tn->bits)) {
2035 struct node *n = tnode_get_child(tn, cindex);
2037 if (n) {
2038 if (IS_LEAF(n)) {
2039 iter->tnode = tn;
2040 iter->index = cindex + 1;
2041 } else {
2042 /* push down one level */
2043 iter->tnode = (struct tnode *) n;
2044 iter->index = 0;
2045 ++iter->depth;
2047 return n;
2050 ++cindex;
2053 /* Current node exhausted, pop back up */
2054 p = node_parent((struct node *)tn);
2055 if (p) {
2056 cindex = tkey_extract_bits(tn->key, p->pos, p->bits)+1;
2057 tn = p;
2058 --iter->depth;
2059 goto rescan;
2062 /* got root? */
2063 return NULL;
2066 static struct node *fib_trie_get_first(struct fib_trie_iter *iter,
2067 struct trie *t)
2069 struct node *n ;
2071 if (!t)
2072 return NULL;
2074 n = rcu_dereference(t->trie);
2076 if (!iter)
2077 return NULL;
2079 if (n) {
2080 if (IS_TNODE(n)) {
2081 iter->tnode = (struct tnode *) n;
2082 iter->trie = t;
2083 iter->index = 0;
2084 iter->depth = 1;
2085 } else {
2086 iter->tnode = NULL;
2087 iter->trie = t;
2088 iter->index = 0;
2089 iter->depth = 0;
2091 return n;
2093 return NULL;
2096 static void trie_collect_stats(struct trie *t, struct trie_stat *s)
2098 struct node *n;
2099 struct fib_trie_iter iter;
2101 memset(s, 0, sizeof(*s));
2103 rcu_read_lock();
2104 for (n = fib_trie_get_first(&iter, t); n;
2105 n = fib_trie_get_next(&iter)) {
2106 if (IS_LEAF(n)) {
2107 s->leaves++;
2108 s->totdepth += iter.depth;
2109 if (iter.depth > s->maxdepth)
2110 s->maxdepth = iter.depth;
2111 } else {
2112 const struct tnode *tn = (const struct tnode *) n;
2113 int i;
2115 s->tnodes++;
2116 if (tn->bits < MAX_STAT_DEPTH)
2117 s->nodesizes[tn->bits]++;
2119 for (i = 0; i < (1<<tn->bits); i++)
2120 if (!tn->child[i])
2121 s->nullpointers++;
2124 rcu_read_unlock();
2128 * This outputs /proc/net/fib_triestats
2130 static void trie_show_stats(struct seq_file *seq, struct trie_stat *stat)
2132 unsigned i, max, pointers, bytes, avdepth;
2134 if (stat->leaves)
2135 avdepth = stat->totdepth*100 / stat->leaves;
2136 else
2137 avdepth = 0;
2139 seq_printf(seq, "\tAver depth: %d.%02d\n", avdepth / 100, avdepth % 100 );
2140 seq_printf(seq, "\tMax depth: %u\n", stat->maxdepth);
2142 seq_printf(seq, "\tLeaves: %u\n", stat->leaves);
2144 bytes = sizeof(struct leaf) * stat->leaves;
2145 seq_printf(seq, "\tInternal nodes: %d\n\t", stat->tnodes);
2146 bytes += sizeof(struct tnode) * stat->tnodes;
2148 max = MAX_STAT_DEPTH;
2149 while (max > 0 && stat->nodesizes[max-1] == 0)
2150 max--;
2152 pointers = 0;
2153 for (i = 1; i <= max; i++)
2154 if (stat->nodesizes[i] != 0) {
2155 seq_printf(seq, " %d: %d", i, stat->nodesizes[i]);
2156 pointers += (1<<i) * stat->nodesizes[i];
2158 seq_putc(seq, '\n');
2159 seq_printf(seq, "\tPointers: %d\n", pointers);
2161 bytes += sizeof(struct node *) * pointers;
2162 seq_printf(seq, "Null ptrs: %d\n", stat->nullpointers);
2163 seq_printf(seq, "Total size: %d kB\n", (bytes + 1023) / 1024);
2165 #ifdef CONFIG_IP_FIB_TRIE_STATS
2166 seq_printf(seq, "Counters:\n---------\n");
2167 seq_printf(seq,"gets = %d\n", t->stats.gets);
2168 seq_printf(seq,"backtracks = %d\n", t->stats.backtrack);
2169 seq_printf(seq,"semantic match passed = %d\n", t->stats.semantic_match_passed);
2170 seq_printf(seq,"semantic match miss = %d\n", t->stats.semantic_match_miss);
2171 seq_printf(seq,"null node hit= %d\n", t->stats.null_node_hit);
2172 seq_printf(seq,"skipped node resize = %d\n", t->stats.resize_node_skipped);
2173 #ifdef CLEAR_STATS
2174 memset(&(t->stats), 0, sizeof(t->stats));
2175 #endif
2176 #endif /* CONFIG_IP_FIB_TRIE_STATS */
2179 static int fib_triestat_seq_show(struct seq_file *seq, void *v)
2181 struct trie_stat *stat;
2183 stat = kmalloc(sizeof(*stat), GFP_KERNEL);
2184 if (!stat)
2185 return -ENOMEM;
2187 seq_printf(seq, "Basic info: size of leaf: %Zd bytes, size of tnode: %Zd bytes.\n",
2188 sizeof(struct leaf), sizeof(struct tnode));
2190 if (trie_local) {
2191 seq_printf(seq, "Local:\n");
2192 trie_collect_stats(trie_local, stat);
2193 trie_show_stats(seq, stat);
2196 if (trie_main) {
2197 seq_printf(seq, "Main:\n");
2198 trie_collect_stats(trie_main, stat);
2199 trie_show_stats(seq, stat);
2201 kfree(stat);
2203 return 0;
2206 static int fib_triestat_seq_open(struct inode *inode, struct file *file)
2208 return single_open(file, fib_triestat_seq_show, NULL);
2211 static const struct file_operations fib_triestat_fops = {
2212 .owner = THIS_MODULE,
2213 .open = fib_triestat_seq_open,
2214 .read = seq_read,
2215 .llseek = seq_lseek,
2216 .release = single_release,
2219 static struct node *fib_trie_get_idx(struct fib_trie_iter *iter,
2220 loff_t pos)
2222 loff_t idx = 0;
2223 struct node *n;
2225 for (n = fib_trie_get_first(iter, trie_local);
2226 n; ++idx, n = fib_trie_get_next(iter)) {
2227 if (pos == idx)
2228 return n;
2231 for (n = fib_trie_get_first(iter, trie_main);
2232 n; ++idx, n = fib_trie_get_next(iter)) {
2233 if (pos == idx)
2234 return n;
2236 return NULL;
2239 static void *fib_trie_seq_start(struct seq_file *seq, loff_t *pos)
2241 rcu_read_lock();
2242 if (*pos == 0)
2243 return SEQ_START_TOKEN;
2244 return fib_trie_get_idx(seq->private, *pos - 1);
2247 static void *fib_trie_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2249 struct fib_trie_iter *iter = seq->private;
2250 void *l = v;
2252 ++*pos;
2253 if (v == SEQ_START_TOKEN)
2254 return fib_trie_get_idx(iter, 0);
2256 v = fib_trie_get_next(iter);
2257 BUG_ON(v == l);
2258 if (v)
2259 return v;
2261 /* continue scan in next trie */
2262 if (iter->trie == trie_local)
2263 return fib_trie_get_first(iter, trie_main);
2265 return NULL;
2268 static void fib_trie_seq_stop(struct seq_file *seq, void *v)
2270 rcu_read_unlock();
2273 static void seq_indent(struct seq_file *seq, int n)
2275 while (n-- > 0) seq_puts(seq, " ");
2278 static inline const char *rtn_scope(enum rt_scope_t s)
2280 static char buf[32];
2282 switch (s) {
2283 case RT_SCOPE_UNIVERSE: return "universe";
2284 case RT_SCOPE_SITE: return "site";
2285 case RT_SCOPE_LINK: return "link";
2286 case RT_SCOPE_HOST: return "host";
2287 case RT_SCOPE_NOWHERE: return "nowhere";
2288 default:
2289 snprintf(buf, sizeof(buf), "scope=%d", s);
2290 return buf;
2294 static const char *rtn_type_names[__RTN_MAX] = {
2295 [RTN_UNSPEC] = "UNSPEC",
2296 [RTN_UNICAST] = "UNICAST",
2297 [RTN_LOCAL] = "LOCAL",
2298 [RTN_BROADCAST] = "BROADCAST",
2299 [RTN_ANYCAST] = "ANYCAST",
2300 [RTN_MULTICAST] = "MULTICAST",
2301 [RTN_BLACKHOLE] = "BLACKHOLE",
2302 [RTN_UNREACHABLE] = "UNREACHABLE",
2303 [RTN_PROHIBIT] = "PROHIBIT",
2304 [RTN_THROW] = "THROW",
2305 [RTN_NAT] = "NAT",
2306 [RTN_XRESOLVE] = "XRESOLVE",
2309 static inline const char *rtn_type(unsigned t)
2311 static char buf[32];
2313 if (t < __RTN_MAX && rtn_type_names[t])
2314 return rtn_type_names[t];
2315 snprintf(buf, sizeof(buf), "type %d", t);
2316 return buf;
2319 /* Pretty print the trie */
2320 static int fib_trie_seq_show(struct seq_file *seq, void *v)
2322 const struct fib_trie_iter *iter = seq->private;
2323 struct node *n = v;
2325 if (v == SEQ_START_TOKEN)
2326 return 0;
2328 if (!node_parent(n)) {
2329 if (iter->trie == trie_local)
2330 seq_puts(seq, "<local>:\n");
2331 else
2332 seq_puts(seq, "<main>:\n");
2335 if (IS_TNODE(n)) {
2336 struct tnode *tn = (struct tnode *) n;
2337 __be32 prf = htonl(mask_pfx(tn->key, tn->pos));
2339 seq_indent(seq, iter->depth-1);
2340 seq_printf(seq, " +-- %d.%d.%d.%d/%d %d %d %d\n",
2341 NIPQUAD(prf), tn->pos, tn->bits, tn->full_children,
2342 tn->empty_children);
2344 } else {
2345 struct leaf *l = (struct leaf *) n;
2346 int i;
2347 __be32 val = htonl(l->key);
2349 seq_indent(seq, iter->depth);
2350 seq_printf(seq, " |-- %d.%d.%d.%d\n", NIPQUAD(val));
2351 for (i = 32; i >= 0; i--) {
2352 struct leaf_info *li = find_leaf_info(l, i);
2353 if (li) {
2354 struct fib_alias *fa;
2355 list_for_each_entry_rcu(fa, &li->falh, fa_list) {
2356 seq_indent(seq, iter->depth+1);
2357 seq_printf(seq, " /%d %s %s", i,
2358 rtn_scope(fa->fa_scope),
2359 rtn_type(fa->fa_type));
2360 if (fa->fa_tos)
2361 seq_printf(seq, "tos =%d\n",
2362 fa->fa_tos);
2363 seq_putc(seq, '\n');
2369 return 0;
2372 static const struct seq_operations fib_trie_seq_ops = {
2373 .start = fib_trie_seq_start,
2374 .next = fib_trie_seq_next,
2375 .stop = fib_trie_seq_stop,
2376 .show = fib_trie_seq_show,
2379 static int fib_trie_seq_open(struct inode *inode, struct file *file)
2381 struct seq_file *seq;
2382 int rc = -ENOMEM;
2383 struct fib_trie_iter *s = kmalloc(sizeof(*s), GFP_KERNEL);
2385 if (!s)
2386 goto out;
2388 rc = seq_open(file, &fib_trie_seq_ops);
2389 if (rc)
2390 goto out_kfree;
2392 seq = file->private_data;
2393 seq->private = s;
2394 memset(s, 0, sizeof(*s));
2395 out:
2396 return rc;
2397 out_kfree:
2398 kfree(s);
2399 goto out;
2402 static const struct file_operations fib_trie_fops = {
2403 .owner = THIS_MODULE,
2404 .open = fib_trie_seq_open,
2405 .read = seq_read,
2406 .llseek = seq_lseek,
2407 .release = seq_release_private,
2410 static unsigned fib_flag_trans(int type, __be32 mask, const struct fib_info *fi)
2412 static unsigned type2flags[RTN_MAX + 1] = {
2413 [7] = RTF_REJECT, [8] = RTF_REJECT,
2415 unsigned flags = type2flags[type];
2417 if (fi && fi->fib_nh->nh_gw)
2418 flags |= RTF_GATEWAY;
2419 if (mask == htonl(0xFFFFFFFF))
2420 flags |= RTF_HOST;
2421 flags |= RTF_UP;
2422 return flags;
2426 * This outputs /proc/net/route.
2427 * The format of the file is not supposed to be changed
2428 * and needs to be same as fib_hash output to avoid breaking
2429 * legacy utilities
2431 static int fib_route_seq_show(struct seq_file *seq, void *v)
2433 const struct fib_trie_iter *iter = seq->private;
2434 struct leaf *l = v;
2435 int i;
2436 char bf[128];
2438 if (v == SEQ_START_TOKEN) {
2439 seq_printf(seq, "%-127s\n", "Iface\tDestination\tGateway "
2440 "\tFlags\tRefCnt\tUse\tMetric\tMask\t\tMTU"
2441 "\tWindow\tIRTT");
2442 return 0;
2445 if (iter->trie == trie_local)
2446 return 0;
2447 if (IS_TNODE(l))
2448 return 0;
2450 for (i=32; i>=0; i--) {
2451 struct leaf_info *li = find_leaf_info(l, i);
2452 struct fib_alias *fa;
2453 __be32 mask, prefix;
2455 if (!li)
2456 continue;
2458 mask = inet_make_mask(li->plen);
2459 prefix = htonl(l->key);
2461 list_for_each_entry_rcu(fa, &li->falh, fa_list) {
2462 const struct fib_info *fi = fa->fa_info;
2463 unsigned flags = fib_flag_trans(fa->fa_type, mask, fi);
2465 if (fa->fa_type == RTN_BROADCAST
2466 || fa->fa_type == RTN_MULTICAST)
2467 continue;
2469 if (fi)
2470 snprintf(bf, sizeof(bf),
2471 "%s\t%08X\t%08X\t%04X\t%d\t%u\t%d\t%08X\t%d\t%u\t%u",
2472 fi->fib_dev ? fi->fib_dev->name : "*",
2473 prefix,
2474 fi->fib_nh->nh_gw, flags, 0, 0,
2475 fi->fib_priority,
2476 mask,
2477 (fi->fib_advmss ? fi->fib_advmss + 40 : 0),
2478 fi->fib_window,
2479 fi->fib_rtt >> 3);
2480 else
2481 snprintf(bf, sizeof(bf),
2482 "*\t%08X\t%08X\t%04X\t%d\t%u\t%d\t%08X\t%d\t%u\t%u",
2483 prefix, 0, flags, 0, 0, 0,
2484 mask, 0, 0, 0);
2486 seq_printf(seq, "%-127s\n", bf);
2490 return 0;
2493 static const struct seq_operations fib_route_seq_ops = {
2494 .start = fib_trie_seq_start,
2495 .next = fib_trie_seq_next,
2496 .stop = fib_trie_seq_stop,
2497 .show = fib_route_seq_show,
2500 static int fib_route_seq_open(struct inode *inode, struct file *file)
2502 struct seq_file *seq;
2503 int rc = -ENOMEM;
2504 struct fib_trie_iter *s = kmalloc(sizeof(*s), GFP_KERNEL);
2506 if (!s)
2507 goto out;
2509 rc = seq_open(file, &fib_route_seq_ops);
2510 if (rc)
2511 goto out_kfree;
2513 seq = file->private_data;
2514 seq->private = s;
2515 memset(s, 0, sizeof(*s));
2516 out:
2517 return rc;
2518 out_kfree:
2519 kfree(s);
2520 goto out;
2523 static const struct file_operations fib_route_fops = {
2524 .owner = THIS_MODULE,
2525 .open = fib_route_seq_open,
2526 .read = seq_read,
2527 .llseek = seq_lseek,
2528 .release = seq_release_private,
2531 int __init fib_proc_init(void)
2533 if (!proc_net_fops_create("fib_trie", S_IRUGO, &fib_trie_fops))
2534 goto out1;
2536 if (!proc_net_fops_create("fib_triestat", S_IRUGO, &fib_triestat_fops))
2537 goto out2;
2539 if (!proc_net_fops_create("route", S_IRUGO, &fib_route_fops))
2540 goto out3;
2542 return 0;
2544 out3:
2545 proc_net_remove("fib_triestat");
2546 out2:
2547 proc_net_remove("fib_trie");
2548 out1:
2549 return -ENOMEM;
2552 void __init fib_proc_exit(void)
2554 proc_net_remove("fib_trie");
2555 proc_net_remove("fib_triestat");
2556 proc_net_remove("route");
2559 #endif /* CONFIG_PROC_FS */