2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version
5 * 2 of the License, or (at your option) any later version.
7 * Robert Olsson <robert.olsson@its.uu.se> Uppsala Universitet
8 * & Swedish University of Agricultural Sciences.
10 * Jens Laas <jens.laas@data.slu.se> Swedish University of
11 * Agricultural Sciences.
13 * Hans Liss <hans.liss@its.uu.se> Uppsala Universitet
15 * This work is based on the LPC-trie which is originally descibed in:
17 * An experimental study of compression methods for dynamic tries
18 * Stefan Nilsson and Matti Tikkanen. Algorithmica, 33(1):19-33, 2002.
19 * http://www.nada.kth.se/~snilsson/public/papers/dyntrie2/
22 * IP-address lookup using LC-tries. Stefan Nilsson and Gunnar Karlsson
23 * IEEE Journal on Selected Areas in Communications, 17(6):1083-1092, June 1999
26 * Code from fib_hash has been reused which includes the following header:
29 * INET An implementation of the TCP/IP protocol suite for the LINUX
30 * operating system. INET is implemented using the BSD Socket
31 * interface as the means of communication with the user level.
33 * IPv4 FIB: lookup engine and maintenance routines.
36 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
38 * This program is free software; you can redistribute it and/or
39 * modify it under the terms of the GNU General Public License
40 * as published by the Free Software Foundation; either version
41 * 2 of the License, or (at your option) any later version.
43 * Substantial contributions to this work comes from:
45 * David S. Miller, <davem@davemloft.net>
46 * Stephen Hemminger <shemminger@osdl.org>
47 * Paul E. McKenney <paulmck@us.ibm.com>
48 * Patrick McHardy <kaber@trash.net>
51 #define VERSION "0.408"
53 #include <asm/uaccess.h>
54 #include <asm/system.h>
55 #include <linux/bitops.h>
56 #include <linux/types.h>
57 #include <linux/kernel.h>
59 #include <linux/string.h>
60 #include <linux/socket.h>
61 #include <linux/sockios.h>
62 #include <linux/errno.h>
64 #include <linux/inet.h>
65 #include <linux/inetdevice.h>
66 #include <linux/netdevice.h>
67 #include <linux/if_arp.h>
68 #include <linux/proc_fs.h>
69 #include <linux/rcupdate.h>
70 #include <linux/skbuff.h>
71 #include <linux/netlink.h>
72 #include <linux/init.h>
73 #include <linux/list.h>
74 #include <net/net_namespace.h>
76 #include <net/protocol.h>
77 #include <net/route.h>
80 #include <net/ip_fib.h>
81 #include "fib_lookup.h"
83 #define MAX_STAT_DEPTH 32
85 #define KEYLENGTH (8*sizeof(t_key))
87 typedef unsigned int t_key
;
91 #define NODE_TYPE_MASK 0x1UL
92 #define NODE_TYPE(node) ((node)->parent & NODE_TYPE_MASK)
94 #define IS_TNODE(n) (!(n->parent & T_LEAF))
95 #define IS_LEAF(n) (n->parent & T_LEAF)
103 unsigned long parent
;
105 struct hlist_head list
;
110 struct hlist_node hlist
;
113 struct list_head falh
;
117 unsigned long parent
;
119 unsigned char pos
; /* 2log(KEYLENGTH) bits needed */
120 unsigned char bits
; /* 2log(KEYLENGTH) bits needed */
121 unsigned int full_children
; /* KEYLENGTH bits needed */
122 unsigned int empty_children
; /* KEYLENGTH bits needed */
125 struct work_struct work
;
126 struct tnode
*tnode_free
;
128 struct node
*child
[0];
131 #ifdef CONFIG_IP_FIB_TRIE_STATS
132 struct trie_use_stats
{
134 unsigned int backtrack
;
135 unsigned int semantic_match_passed
;
136 unsigned int semantic_match_miss
;
137 unsigned int null_node_hit
;
138 unsigned int resize_node_skipped
;
143 unsigned int totdepth
;
144 unsigned int maxdepth
;
147 unsigned int nullpointers
;
148 unsigned int prefixes
;
149 unsigned int nodesizes
[MAX_STAT_DEPTH
];
154 #ifdef CONFIG_IP_FIB_TRIE_STATS
155 struct trie_use_stats stats
;
159 static void put_child(struct trie
*t
, struct tnode
*tn
, int i
, struct node
*n
);
160 static void tnode_put_child_reorg(struct tnode
*tn
, int i
, struct node
*n
,
162 static struct node
*resize(struct trie
*t
, struct tnode
*tn
);
163 static struct tnode
*inflate(struct trie
*t
, struct tnode
*tn
);
164 static struct tnode
*halve(struct trie
*t
, struct tnode
*tn
);
165 /* tnodes to free after resize(); protected by RTNL */
166 static struct tnode
*tnode_free_head
;
168 static struct kmem_cache
*fn_alias_kmem __read_mostly
;
169 static struct kmem_cache
*trie_leaf_kmem __read_mostly
;
171 static inline struct tnode
*node_parent(struct node
*node
)
173 return (struct tnode
*)(node
->parent
& ~NODE_TYPE_MASK
);
176 static inline struct tnode
*node_parent_rcu(struct node
*node
)
178 struct tnode
*ret
= node_parent(node
);
180 return rcu_dereference(ret
);
183 /* Same as rcu_assign_pointer
184 * but that macro() assumes that value is a pointer.
186 static inline void node_set_parent(struct node
*node
, struct tnode
*ptr
)
189 node
->parent
= (unsigned long)ptr
| NODE_TYPE(node
);
192 static inline struct node
*tnode_get_child(struct tnode
*tn
, unsigned int i
)
194 BUG_ON(i
>= 1U << tn
->bits
);
199 static inline struct node
*tnode_get_child_rcu(struct tnode
*tn
, unsigned int i
)
201 struct node
*ret
= tnode_get_child(tn
, i
);
203 return rcu_dereference(ret
);
206 static inline int tnode_child_length(const struct tnode
*tn
)
208 return 1 << tn
->bits
;
211 static inline t_key
mask_pfx(t_key k
, unsigned short l
)
213 return (l
== 0) ? 0 : k
>> (KEYLENGTH
-l
) << (KEYLENGTH
-l
);
216 static inline t_key
tkey_extract_bits(t_key a
, int offset
, int bits
)
218 if (offset
< KEYLENGTH
)
219 return ((t_key
)(a
<< offset
)) >> (KEYLENGTH
- bits
);
224 static inline int tkey_equals(t_key a
, t_key b
)
229 static inline int tkey_sub_equals(t_key a
, int offset
, int bits
, t_key b
)
231 if (bits
== 0 || offset
>= KEYLENGTH
)
233 bits
= bits
> KEYLENGTH
? KEYLENGTH
: bits
;
234 return ((a
^ b
) << offset
) >> (KEYLENGTH
- bits
) == 0;
237 static inline int tkey_mismatch(t_key a
, int offset
, t_key b
)
244 while ((diff
<< i
) >> (KEYLENGTH
-1) == 0)
250 To understand this stuff, an understanding of keys and all their bits is
251 necessary. Every node in the trie has a key associated with it, but not
252 all of the bits in that key are significant.
254 Consider a node 'n' and its parent 'tp'.
256 If n is a leaf, every bit in its key is significant. Its presence is
257 necessitated by path compression, since during a tree traversal (when
258 searching for a leaf - unless we are doing an insertion) we will completely
259 ignore all skipped bits we encounter. Thus we need to verify, at the end of
260 a potentially successful search, that we have indeed been walking the
263 Note that we can never "miss" the correct key in the tree if present by
264 following the wrong path. Path compression ensures that segments of the key
265 that are the same for all keys with a given prefix are skipped, but the
266 skipped part *is* identical for each node in the subtrie below the skipped
267 bit! trie_insert() in this implementation takes care of that - note the
268 call to tkey_sub_equals() in trie_insert().
270 if n is an internal node - a 'tnode' here, the various parts of its key
271 have many different meanings.
274 _________________________________________________________________
275 | i | i | i | i | i | i | i | N | N | N | S | S | S | S | S | C |
276 -----------------------------------------------------------------
277 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
279 _________________________________________________________________
280 | C | C | C | u | u | u | u | u | u | u | u | u | u | u | u | u |
281 -----------------------------------------------------------------
282 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
289 First, let's just ignore the bits that come before the parent tp, that is
290 the bits from 0 to (tp->pos-1). They are *known* but at this point we do
291 not use them for anything.
293 The bits from (tp->pos) to (tp->pos + tp->bits - 1) - "N", above - are the
294 index into the parent's child array. That is, they will be used to find
295 'n' among tp's children.
297 The bits from (tp->pos + tp->bits) to (n->pos - 1) - "S" - are skipped bits
300 All the bits we have seen so far are significant to the node n. The rest
301 of the bits are really not needed or indeed known in n->key.
303 The bits from (n->pos) to (n->pos + n->bits - 1) - "C" - are the index into
304 n's child array, and will of course be different for each child.
307 The rest of the bits, from (n->pos + n->bits) onward, are completely unknown
312 static inline void check_tnode(const struct tnode
*tn
)
314 WARN_ON(tn
&& tn
->pos
+tn
->bits
> 32);
317 static const int halve_threshold
= 25;
318 static const int inflate_threshold
= 50;
319 static const int halve_threshold_root
= 15;
320 static const int inflate_threshold_root
= 25;
323 static void __alias_free_mem(struct rcu_head
*head
)
325 struct fib_alias
*fa
= container_of(head
, struct fib_alias
, rcu
);
326 kmem_cache_free(fn_alias_kmem
, fa
);
329 static inline void alias_free_mem_rcu(struct fib_alias
*fa
)
331 call_rcu(&fa
->rcu
, __alias_free_mem
);
334 static void __leaf_free_rcu(struct rcu_head
*head
)
336 struct leaf
*l
= container_of(head
, struct leaf
, rcu
);
337 kmem_cache_free(trie_leaf_kmem
, l
);
340 static inline void free_leaf(struct leaf
*l
)
342 call_rcu_bh(&l
->rcu
, __leaf_free_rcu
);
345 static void __leaf_info_free_rcu(struct rcu_head
*head
)
347 kfree(container_of(head
, struct leaf_info
, rcu
));
350 static inline void free_leaf_info(struct leaf_info
*leaf
)
352 call_rcu(&leaf
->rcu
, __leaf_info_free_rcu
);
355 static struct tnode
*tnode_alloc(size_t size
)
357 if (size
<= PAGE_SIZE
)
358 return kzalloc(size
, GFP_KERNEL
);
360 return __vmalloc(size
, GFP_KERNEL
| __GFP_ZERO
, PAGE_KERNEL
);
363 static void __tnode_vfree(struct work_struct
*arg
)
365 struct tnode
*tn
= container_of(arg
, struct tnode
, work
);
369 static void __tnode_free_rcu(struct rcu_head
*head
)
371 struct tnode
*tn
= container_of(head
, struct tnode
, rcu
);
372 size_t size
= sizeof(struct tnode
) +
373 (sizeof(struct node
*) << tn
->bits
);
375 if (size
<= PAGE_SIZE
)
378 INIT_WORK(&tn
->work
, __tnode_vfree
);
379 schedule_work(&tn
->work
);
383 static inline void tnode_free(struct tnode
*tn
)
386 free_leaf((struct leaf
*) tn
);
388 call_rcu(&tn
->rcu
, __tnode_free_rcu
);
391 static void tnode_free_safe(struct tnode
*tn
)
394 tn
->tnode_free
= tnode_free_head
;
395 tnode_free_head
= tn
;
398 static void tnode_free_flush(void)
402 while ((tn
= tnode_free_head
)) {
403 tnode_free_head
= tn
->tnode_free
;
404 tn
->tnode_free
= NULL
;
409 static struct leaf
*leaf_new(void)
411 struct leaf
*l
= kmem_cache_alloc(trie_leaf_kmem
, GFP_KERNEL
);
414 INIT_HLIST_HEAD(&l
->list
);
419 static struct leaf_info
*leaf_info_new(int plen
)
421 struct leaf_info
*li
= kmalloc(sizeof(struct leaf_info
), GFP_KERNEL
);
424 INIT_LIST_HEAD(&li
->falh
);
429 static struct tnode
*tnode_new(t_key key
, int pos
, int bits
)
431 size_t sz
= sizeof(struct tnode
) + (sizeof(struct node
*) << bits
);
432 struct tnode
*tn
= tnode_alloc(sz
);
435 tn
->parent
= T_TNODE
;
439 tn
->full_children
= 0;
440 tn
->empty_children
= 1<<bits
;
443 pr_debug("AT %p s=%u %lu\n", tn
, (unsigned int) sizeof(struct tnode
),
444 (unsigned long) (sizeof(struct node
) << bits
));
449 * Check whether a tnode 'n' is "full", i.e. it is an internal node
450 * and no bits are skipped. See discussion in dyntree paper p. 6
453 static inline int tnode_full(const struct tnode
*tn
, const struct node
*n
)
455 if (n
== NULL
|| IS_LEAF(n
))
458 return ((struct tnode
*) n
)->pos
== tn
->pos
+ tn
->bits
;
461 static inline void put_child(struct trie
*t
, struct tnode
*tn
, int i
,
464 tnode_put_child_reorg(tn
, i
, n
, -1);
468 * Add a child at position i overwriting the old value.
469 * Update the value of full_children and empty_children.
472 static void tnode_put_child_reorg(struct tnode
*tn
, int i
, struct node
*n
,
475 struct node
*chi
= tn
->child
[i
];
478 BUG_ON(i
>= 1<<tn
->bits
);
480 /* update emptyChildren */
481 if (n
== NULL
&& chi
!= NULL
)
482 tn
->empty_children
++;
483 else if (n
!= NULL
&& chi
== NULL
)
484 tn
->empty_children
--;
486 /* update fullChildren */
488 wasfull
= tnode_full(tn
, chi
);
490 isfull
= tnode_full(tn
, n
);
491 if (wasfull
&& !isfull
)
493 else if (!wasfull
&& isfull
)
497 node_set_parent(n
, tn
);
499 rcu_assign_pointer(tn
->child
[i
], n
);
502 static struct node
*resize(struct trie
*t
, struct tnode
*tn
)
506 struct tnode
*old_tn
;
507 int inflate_threshold_use
;
508 int halve_threshold_use
;
514 pr_debug("In tnode_resize %p inflate_threshold=%d threshold=%d\n",
515 tn
, inflate_threshold
, halve_threshold
);
518 if (tn
->empty_children
== tnode_child_length(tn
)) {
523 if (tn
->empty_children
== tnode_child_length(tn
) - 1)
524 for (i
= 0; i
< tnode_child_length(tn
); i
++) {
531 /* compress one level */
532 node_set_parent(n
, NULL
);
537 * Double as long as the resulting node has a number of
538 * nonempty nodes that are above the threshold.
542 * From "Implementing a dynamic compressed trie" by Stefan Nilsson of
543 * the Helsinki University of Technology and Matti Tikkanen of Nokia
544 * Telecommunications, page 6:
545 * "A node is doubled if the ratio of non-empty children to all
546 * children in the *doubled* node is at least 'high'."
548 * 'high' in this instance is the variable 'inflate_threshold'. It
549 * is expressed as a percentage, so we multiply it with
550 * tnode_child_length() and instead of multiplying by 2 (since the
551 * child array will be doubled by inflate()) and multiplying
552 * the left-hand side by 100 (to handle the percentage thing) we
553 * multiply the left-hand side by 50.
555 * The left-hand side may look a bit weird: tnode_child_length(tn)
556 * - tn->empty_children is of course the number of non-null children
557 * in the current node. tn->full_children is the number of "full"
558 * children, that is non-null tnodes with a skip value of 0.
559 * All of those will be doubled in the resulting inflated tnode, so
560 * we just count them one extra time here.
562 * A clearer way to write this would be:
564 * to_be_doubled = tn->full_children;
565 * not_to_be_doubled = tnode_child_length(tn) - tn->empty_children -
568 * new_child_length = tnode_child_length(tn) * 2;
570 * new_fill_factor = 100 * (not_to_be_doubled + 2*to_be_doubled) /
572 * if (new_fill_factor >= inflate_threshold)
574 * ...and so on, tho it would mess up the while () loop.
577 * 100 * (not_to_be_doubled + 2*to_be_doubled) / new_child_length >=
581 * 100 * (not_to_be_doubled + 2*to_be_doubled) >=
582 * inflate_threshold * new_child_length
584 * expand not_to_be_doubled and to_be_doubled, and shorten:
585 * 100 * (tnode_child_length(tn) - tn->empty_children +
586 * tn->full_children) >= inflate_threshold * new_child_length
588 * expand new_child_length:
589 * 100 * (tnode_child_length(tn) - tn->empty_children +
590 * tn->full_children) >=
591 * inflate_threshold * tnode_child_length(tn) * 2
594 * 50 * (tn->full_children + tnode_child_length(tn) -
595 * tn->empty_children) >= inflate_threshold *
596 * tnode_child_length(tn)
602 /* Keep root node larger */
605 inflate_threshold_use
= inflate_threshold_root
;
607 inflate_threshold_use
= inflate_threshold
;
611 while ((tn
->full_children
> 0 && max_resize
-- &&
612 50 * (tn
->full_children
+ tnode_child_length(tn
)
613 - tn
->empty_children
)
614 >= inflate_threshold_use
* tnode_child_length(tn
))) {
621 #ifdef CONFIG_IP_FIB_TRIE_STATS
622 t
->stats
.resize_node_skipped
++;
628 if (max_resize
< 0) {
630 pr_warning("Fix inflate_threshold_root."
631 " Now=%d size=%d bits\n",
632 inflate_threshold_root
, tn
->bits
);
634 pr_warning("Fix inflate_threshold."
635 " Now=%d size=%d bits\n",
636 inflate_threshold
, tn
->bits
);
642 * Halve as long as the number of empty children in this
643 * node is above threshold.
647 /* Keep root node larger */
650 halve_threshold_use
= halve_threshold_root
;
652 halve_threshold_use
= halve_threshold
;
656 while (tn
->bits
> 1 && max_resize
-- &&
657 100 * (tnode_child_length(tn
) - tn
->empty_children
) <
658 halve_threshold_use
* tnode_child_length(tn
)) {
664 #ifdef CONFIG_IP_FIB_TRIE_STATS
665 t
->stats
.resize_node_skipped
++;
671 if (max_resize
< 0) {
673 pr_warning("Fix halve_threshold_root."
674 " Now=%d size=%d bits\n",
675 halve_threshold_root
, tn
->bits
);
677 pr_warning("Fix halve_threshold."
678 " Now=%d size=%d bits\n",
679 halve_threshold
, tn
->bits
);
682 /* Only one child remains */
683 if (tn
->empty_children
== tnode_child_length(tn
) - 1)
684 for (i
= 0; i
< tnode_child_length(tn
); i
++) {
691 /* compress one level */
693 node_set_parent(n
, NULL
);
698 return (struct node
*) tn
;
701 static struct tnode
*inflate(struct trie
*t
, struct tnode
*tn
)
703 struct tnode
*oldtnode
= tn
;
704 int olen
= tnode_child_length(tn
);
707 pr_debug("In inflate\n");
709 tn
= tnode_new(oldtnode
->key
, oldtnode
->pos
, oldtnode
->bits
+ 1);
712 return ERR_PTR(-ENOMEM
);
715 * Preallocate and store tnodes before the actual work so we
716 * don't get into an inconsistent state if memory allocation
717 * fails. In case of failure we return the oldnode and inflate
718 * of tnode is ignored.
721 for (i
= 0; i
< olen
; i
++) {
724 inode
= (struct tnode
*) tnode_get_child(oldtnode
, i
);
727 inode
->pos
== oldtnode
->pos
+ oldtnode
->bits
&&
729 struct tnode
*left
, *right
;
730 t_key m
= ~0U << (KEYLENGTH
- 1) >> inode
->pos
;
732 left
= tnode_new(inode
->key
&(~m
), inode
->pos
+ 1,
737 right
= tnode_new(inode
->key
|m
, inode
->pos
+ 1,
745 put_child(t
, tn
, 2*i
, (struct node
*) left
);
746 put_child(t
, tn
, 2*i
+1, (struct node
*) right
);
750 for (i
= 0; i
< olen
; i
++) {
752 struct node
*node
= tnode_get_child(oldtnode
, i
);
753 struct tnode
*left
, *right
;
760 /* A leaf or an internal node with skipped bits */
762 if (IS_LEAF(node
) || ((struct tnode
*) node
)->pos
>
763 tn
->pos
+ tn
->bits
- 1) {
764 if (tkey_extract_bits(node
->key
,
765 oldtnode
->pos
+ oldtnode
->bits
,
767 put_child(t
, tn
, 2*i
, node
);
769 put_child(t
, tn
, 2*i
+1, node
);
773 /* An internal node with two children */
774 inode
= (struct tnode
*) node
;
776 if (inode
->bits
== 1) {
777 put_child(t
, tn
, 2*i
, inode
->child
[0]);
778 put_child(t
, tn
, 2*i
+1, inode
->child
[1]);
780 tnode_free_safe(inode
);
784 /* An internal node with more than two children */
786 /* We will replace this node 'inode' with two new
787 * ones, 'left' and 'right', each with half of the
788 * original children. The two new nodes will have
789 * a position one bit further down the key and this
790 * means that the "significant" part of their keys
791 * (see the discussion near the top of this file)
792 * will differ by one bit, which will be "0" in
793 * left's key and "1" in right's key. Since we are
794 * moving the key position by one step, the bit that
795 * we are moving away from - the bit at position
796 * (inode->pos) - is the one that will differ between
797 * left and right. So... we synthesize that bit in the
799 * The mask 'm' below will be a single "one" bit at
800 * the position (inode->pos)
803 /* Use the old key, but set the new significant
807 left
= (struct tnode
*) tnode_get_child(tn
, 2*i
);
808 put_child(t
, tn
, 2*i
, NULL
);
812 right
= (struct tnode
*) tnode_get_child(tn
, 2*i
+1);
813 put_child(t
, tn
, 2*i
+1, NULL
);
817 size
= tnode_child_length(left
);
818 for (j
= 0; j
< size
; j
++) {
819 put_child(t
, left
, j
, inode
->child
[j
]);
820 put_child(t
, right
, j
, inode
->child
[j
+ size
]);
822 put_child(t
, tn
, 2*i
, resize(t
, left
));
823 put_child(t
, tn
, 2*i
+1, resize(t
, right
));
825 tnode_free_safe(inode
);
827 tnode_free_safe(oldtnode
);
831 int size
= tnode_child_length(tn
);
834 for (j
= 0; j
< size
; j
++)
836 tnode_free((struct tnode
*)tn
->child
[j
]);
840 return ERR_PTR(-ENOMEM
);
844 static struct tnode
*halve(struct trie
*t
, struct tnode
*tn
)
846 struct tnode
*oldtnode
= tn
;
847 struct node
*left
, *right
;
849 int olen
= tnode_child_length(tn
);
851 pr_debug("In halve\n");
853 tn
= tnode_new(oldtnode
->key
, oldtnode
->pos
, oldtnode
->bits
- 1);
856 return ERR_PTR(-ENOMEM
);
859 * Preallocate and store tnodes before the actual work so we
860 * don't get into an inconsistent state if memory allocation
861 * fails. In case of failure we return the oldnode and halve
862 * of tnode is ignored.
865 for (i
= 0; i
< olen
; i
+= 2) {
866 left
= tnode_get_child(oldtnode
, i
);
867 right
= tnode_get_child(oldtnode
, i
+1);
869 /* Two nonempty children */
873 newn
= tnode_new(left
->key
, tn
->pos
+ tn
->bits
, 1);
878 put_child(t
, tn
, i
/2, (struct node
*)newn
);
883 for (i
= 0; i
< olen
; i
+= 2) {
884 struct tnode
*newBinNode
;
886 left
= tnode_get_child(oldtnode
, i
);
887 right
= tnode_get_child(oldtnode
, i
+1);
889 /* At least one of the children is empty */
891 if (right
== NULL
) /* Both are empty */
893 put_child(t
, tn
, i
/2, right
);
898 put_child(t
, tn
, i
/2, left
);
902 /* Two nonempty children */
903 newBinNode
= (struct tnode
*) tnode_get_child(tn
, i
/2);
904 put_child(t
, tn
, i
/2, NULL
);
905 put_child(t
, newBinNode
, 0, left
);
906 put_child(t
, newBinNode
, 1, right
);
907 put_child(t
, tn
, i
/2, resize(t
, newBinNode
));
909 tnode_free_safe(oldtnode
);
913 int size
= tnode_child_length(tn
);
916 for (j
= 0; j
< size
; j
++)
918 tnode_free((struct tnode
*)tn
->child
[j
]);
922 return ERR_PTR(-ENOMEM
);
926 /* readside must use rcu_read_lock currently dump routines
927 via get_fa_head and dump */
929 static struct leaf_info
*find_leaf_info(struct leaf
*l
, int plen
)
931 struct hlist_head
*head
= &l
->list
;
932 struct hlist_node
*node
;
933 struct leaf_info
*li
;
935 hlist_for_each_entry_rcu(li
, node
, head
, hlist
)
936 if (li
->plen
== plen
)
942 static inline struct list_head
*get_fa_head(struct leaf
*l
, int plen
)
944 struct leaf_info
*li
= find_leaf_info(l
, plen
);
952 static void insert_leaf_info(struct hlist_head
*head
, struct leaf_info
*new)
954 struct leaf_info
*li
= NULL
, *last
= NULL
;
955 struct hlist_node
*node
;
957 if (hlist_empty(head
)) {
958 hlist_add_head_rcu(&new->hlist
, head
);
960 hlist_for_each_entry(li
, node
, head
, hlist
) {
961 if (new->plen
> li
->plen
)
967 hlist_add_after_rcu(&last
->hlist
, &new->hlist
);
969 hlist_add_before_rcu(&new->hlist
, &li
->hlist
);
973 /* rcu_read_lock needs to be hold by caller from readside */
976 fib_find_node(struct trie
*t
, u32 key
)
983 n
= rcu_dereference(t
->trie
);
985 while (n
!= NULL
&& NODE_TYPE(n
) == T_TNODE
) {
986 tn
= (struct tnode
*) n
;
990 if (tkey_sub_equals(tn
->key
, pos
, tn
->pos
-pos
, key
)) {
991 pos
= tn
->pos
+ tn
->bits
;
992 n
= tnode_get_child_rcu(tn
,
993 tkey_extract_bits(key
,
999 /* Case we have found a leaf. Compare prefixes */
1001 if (n
!= NULL
&& IS_LEAF(n
) && tkey_equals(key
, n
->key
))
1002 return (struct leaf
*)n
;
1007 static void trie_rebalance(struct trie
*t
, struct tnode
*tn
)
1015 while (tn
!= NULL
&& (tp
= node_parent((struct node
*)tn
)) != NULL
) {
1016 cindex
= tkey_extract_bits(key
, tp
->pos
, tp
->bits
);
1017 wasfull
= tnode_full(tp
, tnode_get_child(tp
, cindex
));
1018 tn
= (struct tnode
*) resize(t
, (struct tnode
*)tn
);
1020 tnode_put_child_reorg((struct tnode
*)tp
, cindex
,
1021 (struct node
*)tn
, wasfull
);
1023 tp
= node_parent((struct node
*) tn
);
1025 rcu_assign_pointer(t
->trie
, (struct node
*)tn
);
1033 /* Handle last (top) tnode */
1035 tn
= (struct tnode
*)resize(t
, (struct tnode
*)tn
);
1037 rcu_assign_pointer(t
->trie
, (struct node
*)tn
);
1043 /* only used from updater-side */
1045 static struct list_head
*fib_insert_node(struct trie
*t
, u32 key
, int plen
)
1048 struct tnode
*tp
= NULL
, *tn
= NULL
;
1052 struct list_head
*fa_head
= NULL
;
1053 struct leaf_info
*li
;
1059 /* If we point to NULL, stop. Either the tree is empty and we should
1060 * just put a new leaf in if, or we have reached an empty child slot,
1061 * and we should just put our new leaf in that.
1062 * If we point to a T_TNODE, check if it matches our key. Note that
1063 * a T_TNODE might be skipping any number of bits - its 'pos' need
1064 * not be the parent's 'pos'+'bits'!
1066 * If it does match the current key, get pos/bits from it, extract
1067 * the index from our key, push the T_TNODE and walk the tree.
1069 * If it doesn't, we have to replace it with a new T_TNODE.
1071 * If we point to a T_LEAF, it might or might not have the same key
1072 * as we do. If it does, just change the value, update the T_LEAF's
1073 * value, and return it.
1074 * If it doesn't, we need to replace it with a T_TNODE.
1077 while (n
!= NULL
&& NODE_TYPE(n
) == T_TNODE
) {
1078 tn
= (struct tnode
*) n
;
1082 if (tkey_sub_equals(tn
->key
, pos
, tn
->pos
-pos
, key
)) {
1084 pos
= tn
->pos
+ tn
->bits
;
1085 n
= tnode_get_child(tn
,
1086 tkey_extract_bits(key
,
1090 BUG_ON(n
&& node_parent(n
) != tn
);
1096 * n ----> NULL, LEAF or TNODE
1098 * tp is n's (parent) ----> NULL or TNODE
1101 BUG_ON(tp
&& IS_LEAF(tp
));
1103 /* Case 1: n is a leaf. Compare prefixes */
1105 if (n
!= NULL
&& IS_LEAF(n
) && tkey_equals(key
, n
->key
)) {
1106 l
= (struct leaf
*) n
;
1107 li
= leaf_info_new(plen
);
1112 fa_head
= &li
->falh
;
1113 insert_leaf_info(&l
->list
, li
);
1122 li
= leaf_info_new(plen
);
1129 fa_head
= &li
->falh
;
1130 insert_leaf_info(&l
->list
, li
);
1132 if (t
->trie
&& n
== NULL
) {
1133 /* Case 2: n is NULL, and will just insert a new leaf */
1135 node_set_parent((struct node
*)l
, tp
);
1137 cindex
= tkey_extract_bits(key
, tp
->pos
, tp
->bits
);
1138 put_child(t
, (struct tnode
*)tp
, cindex
, (struct node
*)l
);
1140 /* Case 3: n is a LEAF or a TNODE and the key doesn't match. */
1142 * Add a new tnode here
1143 * first tnode need some special handling
1147 pos
= tp
->pos
+tp
->bits
;
1152 newpos
= tkey_mismatch(key
, pos
, n
->key
);
1153 tn
= tnode_new(n
->key
, newpos
, 1);
1156 tn
= tnode_new(key
, newpos
, 1); /* First tnode */
1165 node_set_parent((struct node
*)tn
, tp
);
1167 missbit
= tkey_extract_bits(key
, newpos
, 1);
1168 put_child(t
, tn
, missbit
, (struct node
*)l
);
1169 put_child(t
, tn
, 1-missbit
, n
);
1172 cindex
= tkey_extract_bits(key
, tp
->pos
, tp
->bits
);
1173 put_child(t
, (struct tnode
*)tp
, cindex
,
1176 rcu_assign_pointer(t
->trie
, (struct node
*)tn
);
1181 if (tp
&& tp
->pos
+ tp
->bits
> 32)
1182 pr_warning("fib_trie"
1183 " tp=%p pos=%d, bits=%d, key=%0x plen=%d\n",
1184 tp
, tp
->pos
, tp
->bits
, key
, plen
);
1186 /* Rebalance the trie */
1188 trie_rebalance(t
, tp
);
1194 * Caller must hold RTNL.
1196 static int fn_trie_insert(struct fib_table
*tb
, struct fib_config
*cfg
)
1198 struct trie
*t
= (struct trie
*) tb
->tb_data
;
1199 struct fib_alias
*fa
, *new_fa
;
1200 struct list_head
*fa_head
= NULL
;
1201 struct fib_info
*fi
;
1202 int plen
= cfg
->fc_dst_len
;
1203 u8 tos
= cfg
->fc_tos
;
1211 key
= ntohl(cfg
->fc_dst
);
1213 pr_debug("Insert table=%u %08x/%d\n", tb
->tb_id
, key
, plen
);
1215 mask
= ntohl(inet_make_mask(plen
));
1222 fi
= fib_create_info(cfg
);
1228 l
= fib_find_node(t
, key
);
1232 fa_head
= get_fa_head(l
, plen
);
1233 fa
= fib_find_alias(fa_head
, tos
, fi
->fib_priority
);
1236 /* Now fa, if non-NULL, points to the first fib alias
1237 * with the same keys [prefix,tos,priority], if such key already
1238 * exists or to the node before which we will insert new one.
1240 * If fa is NULL, we will need to allocate a new one and
1241 * insert to the head of f.
1243 * If f is NULL, no fib node matched the destination key
1244 * and we need to allocate a new one of those as well.
1247 if (fa
&& fa
->fa_tos
== tos
&&
1248 fa
->fa_info
->fib_priority
== fi
->fib_priority
) {
1249 struct fib_alias
*fa_first
, *fa_match
;
1252 if (cfg
->fc_nlflags
& NLM_F_EXCL
)
1256 * 1. Find exact match for type, scope, fib_info to avoid
1258 * 2. Find next 'fa' (or head), NLM_F_APPEND inserts before it
1262 fa
= list_entry(fa
->fa_list
.prev
, struct fib_alias
, fa_list
);
1263 list_for_each_entry_continue(fa
, fa_head
, fa_list
) {
1264 if (fa
->fa_tos
!= tos
)
1266 if (fa
->fa_info
->fib_priority
!= fi
->fib_priority
)
1268 if (fa
->fa_type
== cfg
->fc_type
&&
1269 fa
->fa_scope
== cfg
->fc_scope
&&
1270 fa
->fa_info
== fi
) {
1276 if (cfg
->fc_nlflags
& NLM_F_REPLACE
) {
1277 struct fib_info
*fi_drop
;
1287 new_fa
= kmem_cache_alloc(fn_alias_kmem
, GFP_KERNEL
);
1291 fi_drop
= fa
->fa_info
;
1292 new_fa
->fa_tos
= fa
->fa_tos
;
1293 new_fa
->fa_info
= fi
;
1294 new_fa
->fa_type
= cfg
->fc_type
;
1295 new_fa
->fa_scope
= cfg
->fc_scope
;
1296 state
= fa
->fa_state
;
1297 new_fa
->fa_state
= state
& ~FA_S_ACCESSED
;
1299 list_replace_rcu(&fa
->fa_list
, &new_fa
->fa_list
);
1300 alias_free_mem_rcu(fa
);
1302 fib_release_info(fi_drop
);
1303 if (state
& FA_S_ACCESSED
)
1304 rt_cache_flush(cfg
->fc_nlinfo
.nl_net
, -1);
1305 rtmsg_fib(RTM_NEWROUTE
, htonl(key
), new_fa
, plen
,
1306 tb
->tb_id
, &cfg
->fc_nlinfo
, NLM_F_REPLACE
);
1310 /* Error if we find a perfect match which
1311 * uses the same scope, type, and nexthop
1317 if (!(cfg
->fc_nlflags
& NLM_F_APPEND
))
1321 if (!(cfg
->fc_nlflags
& NLM_F_CREATE
))
1325 new_fa
= kmem_cache_alloc(fn_alias_kmem
, GFP_KERNEL
);
1329 new_fa
->fa_info
= fi
;
1330 new_fa
->fa_tos
= tos
;
1331 new_fa
->fa_type
= cfg
->fc_type
;
1332 new_fa
->fa_scope
= cfg
->fc_scope
;
1333 new_fa
->fa_state
= 0;
1335 * Insert new entry to the list.
1339 fa_head
= fib_insert_node(t
, key
, plen
);
1340 if (unlikely(!fa_head
)) {
1342 goto out_free_new_fa
;
1346 list_add_tail_rcu(&new_fa
->fa_list
,
1347 (fa
? &fa
->fa_list
: fa_head
));
1349 rt_cache_flush(cfg
->fc_nlinfo
.nl_net
, -1);
1350 rtmsg_fib(RTM_NEWROUTE
, htonl(key
), new_fa
, plen
, tb
->tb_id
,
1351 &cfg
->fc_nlinfo
, 0);
1356 kmem_cache_free(fn_alias_kmem
, new_fa
);
1358 fib_release_info(fi
);
1363 /* should be called with rcu_read_lock */
1364 static int check_leaf(struct trie
*t
, struct leaf
*l
,
1365 t_key key
, const struct flowi
*flp
,
1366 struct fib_result
*res
)
1368 struct leaf_info
*li
;
1369 struct hlist_head
*hhead
= &l
->list
;
1370 struct hlist_node
*node
;
1372 hlist_for_each_entry_rcu(li
, node
, hhead
, hlist
) {
1374 int plen
= li
->plen
;
1375 __be32 mask
= inet_make_mask(plen
);
1377 if (l
->key
!= (key
& ntohl(mask
)))
1380 err
= fib_semantic_match(&li
->falh
, flp
, res
, plen
);
1382 #ifdef CONFIG_IP_FIB_TRIE_STATS
1384 t
->stats
.semantic_match_passed
++;
1386 t
->stats
.semantic_match_miss
++;
1395 static int fn_trie_lookup(struct fib_table
*tb
, const struct flowi
*flp
,
1396 struct fib_result
*res
)
1398 struct trie
*t
= (struct trie
*) tb
->tb_data
;
1403 t_key key
= ntohl(flp
->fl4_dst
);
1406 int current_prefix_length
= KEYLENGTH
;
1408 t_key node_prefix
, key_prefix
, pref_mismatch
;
1413 n
= rcu_dereference(t
->trie
);
1417 #ifdef CONFIG_IP_FIB_TRIE_STATS
1423 ret
= check_leaf(t
, (struct leaf
*)n
, key
, flp
, res
);
1427 pn
= (struct tnode
*) n
;
1435 cindex
= tkey_extract_bits(mask_pfx(key
, current_prefix_length
),
1438 n
= tnode_get_child(pn
, cindex
);
1441 #ifdef CONFIG_IP_FIB_TRIE_STATS
1442 t
->stats
.null_node_hit
++;
1448 ret
= check_leaf(t
, (struct leaf
*)n
, key
, flp
, res
);
1454 cn
= (struct tnode
*)n
;
1457 * It's a tnode, and we can do some extra checks here if we
1458 * like, to avoid descending into a dead-end branch.
1459 * This tnode is in the parent's child array at index
1460 * key[p_pos..p_pos+p_bits] but potentially with some bits
1461 * chopped off, so in reality the index may be just a
1462 * subprefix, padded with zero at the end.
1463 * We can also take a look at any skipped bits in this
1464 * tnode - everything up to p_pos is supposed to be ok,
1465 * and the non-chopped bits of the index (se previous
1466 * paragraph) are also guaranteed ok, but the rest is
1467 * considered unknown.
1469 * The skipped bits are key[pos+bits..cn->pos].
1472 /* If current_prefix_length < pos+bits, we are already doing
1473 * actual prefix matching, which means everything from
1474 * pos+(bits-chopped_off) onward must be zero along some
1475 * branch of this subtree - otherwise there is *no* valid
1476 * prefix present. Here we can only check the skipped
1477 * bits. Remember, since we have already indexed into the
1478 * parent's child array, we know that the bits we chopped of
1482 /* NOTA BENE: Checking only skipped bits
1483 for the new node here */
1485 if (current_prefix_length
< pos
+bits
) {
1486 if (tkey_extract_bits(cn
->key
, current_prefix_length
,
1487 cn
->pos
- current_prefix_length
)
1493 * If chopped_off=0, the index is fully validated and we
1494 * only need to look at the skipped bits for this, the new,
1495 * tnode. What we actually want to do is to find out if
1496 * these skipped bits match our key perfectly, or if we will
1497 * have to count on finding a matching prefix further down,
1498 * because if we do, we would like to have some way of
1499 * verifying the existence of such a prefix at this point.
1502 /* The only thing we can do at this point is to verify that
1503 * any such matching prefix can indeed be a prefix to our
1504 * key, and if the bits in the node we are inspecting that
1505 * do not match our key are not ZERO, this cannot be true.
1506 * Thus, find out where there is a mismatch (before cn->pos)
1507 * and verify that all the mismatching bits are zero in the
1512 * Note: We aren't very concerned about the piece of
1513 * the key that precede pn->pos+pn->bits, since these
1514 * have already been checked. The bits after cn->pos
1515 * aren't checked since these are by definition
1516 * "unknown" at this point. Thus, what we want to see
1517 * is if we are about to enter the "prefix matching"
1518 * state, and in that case verify that the skipped
1519 * bits that will prevail throughout this subtree are
1520 * zero, as they have to be if we are to find a
1524 node_prefix
= mask_pfx(cn
->key
, cn
->pos
);
1525 key_prefix
= mask_pfx(key
, cn
->pos
);
1526 pref_mismatch
= key_prefix
^node_prefix
;
1530 * In short: If skipped bits in this node do not match
1531 * the search key, enter the "prefix matching"
1534 if (pref_mismatch
) {
1535 while (!(pref_mismatch
& (1<<(KEYLENGTH
-1)))) {
1537 pref_mismatch
= pref_mismatch
<< 1;
1539 key_prefix
= tkey_extract_bits(cn
->key
, mp
, cn
->pos
-mp
);
1541 if (key_prefix
!= 0)
1544 if (current_prefix_length
>= cn
->pos
)
1545 current_prefix_length
= mp
;
1548 pn
= (struct tnode
*)n
; /* Descend */
1555 /* As zero don't change the child key (cindex) */
1556 while ((chopped_off
<= pn
->bits
)
1557 && !(cindex
& (1<<(chopped_off
-1))))
1560 /* Decrease current_... with bits chopped off */
1561 if (current_prefix_length
> pn
->pos
+ pn
->bits
- chopped_off
)
1562 current_prefix_length
= pn
->pos
+ pn
->bits
1566 * Either we do the actual chop off according or if we have
1567 * chopped off all bits in this tnode walk up to our parent.
1570 if (chopped_off
<= pn
->bits
) {
1571 cindex
&= ~(1 << (chopped_off
-1));
1573 struct tnode
*parent
= node_parent((struct node
*) pn
);
1577 /* Get Child's index */
1578 cindex
= tkey_extract_bits(pn
->key
, parent
->pos
, parent
->bits
);
1582 #ifdef CONFIG_IP_FIB_TRIE_STATS
1583 t
->stats
.backtrack
++;
1596 * Remove the leaf and return parent.
1598 static void trie_leaf_remove(struct trie
*t
, struct leaf
*l
)
1600 struct tnode
*tp
= node_parent((struct node
*) l
);
1602 pr_debug("entering trie_leaf_remove(%p)\n", l
);
1605 t_key cindex
= tkey_extract_bits(l
->key
, tp
->pos
, tp
->bits
);
1606 put_child(t
, (struct tnode
*)tp
, cindex
, NULL
);
1607 trie_rebalance(t
, tp
);
1609 rcu_assign_pointer(t
->trie
, NULL
);
1615 * Caller must hold RTNL.
1617 static int fn_trie_delete(struct fib_table
*tb
, struct fib_config
*cfg
)
1619 struct trie
*t
= (struct trie
*) tb
->tb_data
;
1621 int plen
= cfg
->fc_dst_len
;
1622 u8 tos
= cfg
->fc_tos
;
1623 struct fib_alias
*fa
, *fa_to_delete
;
1624 struct list_head
*fa_head
;
1626 struct leaf_info
*li
;
1631 key
= ntohl(cfg
->fc_dst
);
1632 mask
= ntohl(inet_make_mask(plen
));
1638 l
= fib_find_node(t
, key
);
1643 fa_head
= get_fa_head(l
, plen
);
1644 fa
= fib_find_alias(fa_head
, tos
, 0);
1649 pr_debug("Deleting %08x/%d tos=%d t=%p\n", key
, plen
, tos
, t
);
1651 fa_to_delete
= NULL
;
1652 fa
= list_entry(fa
->fa_list
.prev
, struct fib_alias
, fa_list
);
1653 list_for_each_entry_continue(fa
, fa_head
, fa_list
) {
1654 struct fib_info
*fi
= fa
->fa_info
;
1656 if (fa
->fa_tos
!= tos
)
1659 if ((!cfg
->fc_type
|| fa
->fa_type
== cfg
->fc_type
) &&
1660 (cfg
->fc_scope
== RT_SCOPE_NOWHERE
||
1661 fa
->fa_scope
== cfg
->fc_scope
) &&
1662 (!cfg
->fc_protocol
||
1663 fi
->fib_protocol
== cfg
->fc_protocol
) &&
1664 fib_nh_match(cfg
, fi
) == 0) {
1674 rtmsg_fib(RTM_DELROUTE
, htonl(key
), fa
, plen
, tb
->tb_id
,
1675 &cfg
->fc_nlinfo
, 0);
1677 l
= fib_find_node(t
, key
);
1678 li
= find_leaf_info(l
, plen
);
1680 list_del_rcu(&fa
->fa_list
);
1682 if (list_empty(fa_head
)) {
1683 hlist_del_rcu(&li
->hlist
);
1687 if (hlist_empty(&l
->list
))
1688 trie_leaf_remove(t
, l
);
1690 if (fa
->fa_state
& FA_S_ACCESSED
)
1691 rt_cache_flush(cfg
->fc_nlinfo
.nl_net
, -1);
1693 fib_release_info(fa
->fa_info
);
1694 alias_free_mem_rcu(fa
);
1698 static int trie_flush_list(struct list_head
*head
)
1700 struct fib_alias
*fa
, *fa_node
;
1703 list_for_each_entry_safe(fa
, fa_node
, head
, fa_list
) {
1704 struct fib_info
*fi
= fa
->fa_info
;
1706 if (fi
&& (fi
->fib_flags
& RTNH_F_DEAD
)) {
1707 list_del_rcu(&fa
->fa_list
);
1708 fib_release_info(fa
->fa_info
);
1709 alias_free_mem_rcu(fa
);
1716 static int trie_flush_leaf(struct leaf
*l
)
1719 struct hlist_head
*lih
= &l
->list
;
1720 struct hlist_node
*node
, *tmp
;
1721 struct leaf_info
*li
= NULL
;
1723 hlist_for_each_entry_safe(li
, node
, tmp
, lih
, hlist
) {
1724 found
+= trie_flush_list(&li
->falh
);
1726 if (list_empty(&li
->falh
)) {
1727 hlist_del_rcu(&li
->hlist
);
1735 * Scan for the next right leaf starting at node p->child[idx]
1736 * Since we have back pointer, no recursion necessary.
1738 static struct leaf
*leaf_walk_rcu(struct tnode
*p
, struct node
*c
)
1744 idx
= tkey_extract_bits(c
->key
, p
->pos
, p
->bits
) + 1;
1748 while (idx
< 1u << p
->bits
) {
1749 c
= tnode_get_child_rcu(p
, idx
++);
1754 prefetch(p
->child
[idx
]);
1755 return (struct leaf
*) c
;
1758 /* Rescan start scanning in new node */
1759 p
= (struct tnode
*) c
;
1763 /* Node empty, walk back up to parent */
1764 c
= (struct node
*) p
;
1765 } while ( (p
= node_parent_rcu(c
)) != NULL
);
1767 return NULL
; /* Root of trie */
1770 static struct leaf
*trie_firstleaf(struct trie
*t
)
1772 struct tnode
*n
= (struct tnode
*) rcu_dereference(t
->trie
);
1777 if (IS_LEAF(n
)) /* trie is just a leaf */
1778 return (struct leaf
*) n
;
1780 return leaf_walk_rcu(n
, NULL
);
1783 static struct leaf
*trie_nextleaf(struct leaf
*l
)
1785 struct node
*c
= (struct node
*) l
;
1786 struct tnode
*p
= node_parent(c
);
1789 return NULL
; /* trie with just one leaf */
1791 return leaf_walk_rcu(p
, c
);
1794 static struct leaf
*trie_leafindex(struct trie
*t
, int index
)
1796 struct leaf
*l
= trie_firstleaf(t
);
1798 while (l
&& index
-- > 0)
1799 l
= trie_nextleaf(l
);
1806 * Caller must hold RTNL.
1808 static int fn_trie_flush(struct fib_table
*tb
)
1810 struct trie
*t
= (struct trie
*) tb
->tb_data
;
1811 struct leaf
*l
, *ll
= NULL
;
1814 for (l
= trie_firstleaf(t
); l
; l
= trie_nextleaf(l
)) {
1815 found
+= trie_flush_leaf(l
);
1817 if (ll
&& hlist_empty(&ll
->list
))
1818 trie_leaf_remove(t
, ll
);
1822 if (ll
&& hlist_empty(&ll
->list
))
1823 trie_leaf_remove(t
, ll
);
1825 pr_debug("trie_flush found=%d\n", found
);
1829 static void fn_trie_select_default(struct fib_table
*tb
,
1830 const struct flowi
*flp
,
1831 struct fib_result
*res
)
1833 struct trie
*t
= (struct trie
*) tb
->tb_data
;
1834 int order
, last_idx
;
1835 struct fib_info
*fi
= NULL
;
1836 struct fib_info
*last_resort
;
1837 struct fib_alias
*fa
= NULL
;
1838 struct list_head
*fa_head
;
1847 l
= fib_find_node(t
, 0);
1851 fa_head
= get_fa_head(l
, 0);
1855 if (list_empty(fa_head
))
1858 list_for_each_entry_rcu(fa
, fa_head
, fa_list
) {
1859 struct fib_info
*next_fi
= fa
->fa_info
;
1861 if (fa
->fa_scope
!= res
->scope
||
1862 fa
->fa_type
!= RTN_UNICAST
)
1865 if (next_fi
->fib_priority
> res
->fi
->fib_priority
)
1867 if (!next_fi
->fib_nh
[0].nh_gw
||
1868 next_fi
->fib_nh
[0].nh_scope
!= RT_SCOPE_LINK
)
1870 fa
->fa_state
|= FA_S_ACCESSED
;
1873 if (next_fi
!= res
->fi
)
1875 } else if (!fib_detect_death(fi
, order
, &last_resort
,
1876 &last_idx
, tb
->tb_default
)) {
1877 fib_result_assign(res
, fi
);
1878 tb
->tb_default
= order
;
1884 if (order
<= 0 || fi
== NULL
) {
1885 tb
->tb_default
= -1;
1889 if (!fib_detect_death(fi
, order
, &last_resort
, &last_idx
,
1891 fib_result_assign(res
, fi
);
1892 tb
->tb_default
= order
;
1896 fib_result_assign(res
, last_resort
);
1897 tb
->tb_default
= last_idx
;
1902 static int fn_trie_dump_fa(t_key key
, int plen
, struct list_head
*fah
,
1903 struct fib_table
*tb
,
1904 struct sk_buff
*skb
, struct netlink_callback
*cb
)
1907 struct fib_alias
*fa
;
1908 __be32 xkey
= htonl(key
);
1913 /* rcu_read_lock is hold by caller */
1915 list_for_each_entry_rcu(fa
, fah
, fa_list
) {
1921 if (fib_dump_info(skb
, NETLINK_CB(cb
->skb
).pid
,
1930 fa
->fa_info
, NLM_F_MULTI
) < 0) {
1940 static int fn_trie_dump_leaf(struct leaf
*l
, struct fib_table
*tb
,
1941 struct sk_buff
*skb
, struct netlink_callback
*cb
)
1943 struct leaf_info
*li
;
1944 struct hlist_node
*node
;
1950 /* rcu_read_lock is hold by caller */
1951 hlist_for_each_entry_rcu(li
, node
, &l
->list
, hlist
) {
1960 if (list_empty(&li
->falh
))
1963 if (fn_trie_dump_fa(l
->key
, li
->plen
, &li
->falh
, tb
, skb
, cb
) < 0) {
1974 static int fn_trie_dump(struct fib_table
*tb
, struct sk_buff
*skb
,
1975 struct netlink_callback
*cb
)
1978 struct trie
*t
= (struct trie
*) tb
->tb_data
;
1979 t_key key
= cb
->args
[2];
1980 int count
= cb
->args
[3];
1983 /* Dump starting at last key.
1984 * Note: 0.0.0.0/0 (ie default) is first key.
1987 l
= trie_firstleaf(t
);
1989 /* Normally, continue from last key, but if that is missing
1990 * fallback to using slow rescan
1992 l
= fib_find_node(t
, key
);
1994 l
= trie_leafindex(t
, count
);
1998 cb
->args
[2] = l
->key
;
1999 if (fn_trie_dump_leaf(l
, tb
, skb
, cb
) < 0) {
2000 cb
->args
[3] = count
;
2006 l
= trie_nextleaf(l
);
2007 memset(&cb
->args
[4], 0,
2008 sizeof(cb
->args
) - 4*sizeof(cb
->args
[0]));
2010 cb
->args
[3] = count
;
2016 void __init
fib_hash_init(void)
2018 fn_alias_kmem
= kmem_cache_create("ip_fib_alias",
2019 sizeof(struct fib_alias
),
2020 0, SLAB_PANIC
, NULL
);
2022 trie_leaf_kmem
= kmem_cache_create("ip_fib_trie",
2023 max(sizeof(struct leaf
),
2024 sizeof(struct leaf_info
)),
2025 0, SLAB_PANIC
, NULL
);
2029 /* Fix more generic FIB names for init later */
2030 struct fib_table
*fib_hash_table(u32 id
)
2032 struct fib_table
*tb
;
2035 tb
= kmalloc(sizeof(struct fib_table
) + sizeof(struct trie
),
2041 tb
->tb_default
= -1;
2042 tb
->tb_lookup
= fn_trie_lookup
;
2043 tb
->tb_insert
= fn_trie_insert
;
2044 tb
->tb_delete
= fn_trie_delete
;
2045 tb
->tb_flush
= fn_trie_flush
;
2046 tb
->tb_select_default
= fn_trie_select_default
;
2047 tb
->tb_dump
= fn_trie_dump
;
2049 t
= (struct trie
*) tb
->tb_data
;
2050 memset(t
, 0, sizeof(*t
));
2052 if (id
== RT_TABLE_LOCAL
)
2053 pr_info("IPv4 FIB: Using LC-trie version %s\n", VERSION
);
2058 #ifdef CONFIG_PROC_FS
2059 /* Depth first Trie walk iterator */
2060 struct fib_trie_iter
{
2061 struct seq_net_private p
;
2062 struct fib_table
*tb
;
2063 struct tnode
*tnode
;
2068 static struct node
*fib_trie_get_next(struct fib_trie_iter
*iter
)
2070 struct tnode
*tn
= iter
->tnode
;
2071 unsigned cindex
= iter
->index
;
2074 /* A single entry routing table */
2078 pr_debug("get_next iter={node=%p index=%d depth=%d}\n",
2079 iter
->tnode
, iter
->index
, iter
->depth
);
2081 while (cindex
< (1<<tn
->bits
)) {
2082 struct node
*n
= tnode_get_child_rcu(tn
, cindex
);
2087 iter
->index
= cindex
+ 1;
2089 /* push down one level */
2090 iter
->tnode
= (struct tnode
*) n
;
2100 /* Current node exhausted, pop back up */
2101 p
= node_parent_rcu((struct node
*)tn
);
2103 cindex
= tkey_extract_bits(tn
->key
, p
->pos
, p
->bits
)+1;
2113 static struct node
*fib_trie_get_first(struct fib_trie_iter
*iter
,
2121 n
= rcu_dereference(t
->trie
);
2126 iter
->tnode
= (struct tnode
*) n
;
2138 static void trie_collect_stats(struct trie
*t
, struct trie_stat
*s
)
2141 struct fib_trie_iter iter
;
2143 memset(s
, 0, sizeof(*s
));
2146 for (n
= fib_trie_get_first(&iter
, t
); n
; n
= fib_trie_get_next(&iter
)) {
2148 struct leaf
*l
= (struct leaf
*)n
;
2149 struct leaf_info
*li
;
2150 struct hlist_node
*tmp
;
2153 s
->totdepth
+= iter
.depth
;
2154 if (iter
.depth
> s
->maxdepth
)
2155 s
->maxdepth
= iter
.depth
;
2157 hlist_for_each_entry_rcu(li
, tmp
, &l
->list
, hlist
)
2160 const struct tnode
*tn
= (const struct tnode
*) n
;
2164 if (tn
->bits
< MAX_STAT_DEPTH
)
2165 s
->nodesizes
[tn
->bits
]++;
2167 for (i
= 0; i
< (1<<tn
->bits
); i
++)
2176 * This outputs /proc/net/fib_triestats
2178 static void trie_show_stats(struct seq_file
*seq
, struct trie_stat
*stat
)
2180 unsigned i
, max
, pointers
, bytes
, avdepth
;
2183 avdepth
= stat
->totdepth
*100 / stat
->leaves
;
2187 seq_printf(seq
, "\tAver depth: %u.%02d\n",
2188 avdepth
/ 100, avdepth
% 100);
2189 seq_printf(seq
, "\tMax depth: %u\n", stat
->maxdepth
);
2191 seq_printf(seq
, "\tLeaves: %u\n", stat
->leaves
);
2192 bytes
= sizeof(struct leaf
) * stat
->leaves
;
2194 seq_printf(seq
, "\tPrefixes: %u\n", stat
->prefixes
);
2195 bytes
+= sizeof(struct leaf_info
) * stat
->prefixes
;
2197 seq_printf(seq
, "\tInternal nodes: %u\n\t", stat
->tnodes
);
2198 bytes
+= sizeof(struct tnode
) * stat
->tnodes
;
2200 max
= MAX_STAT_DEPTH
;
2201 while (max
> 0 && stat
->nodesizes
[max
-1] == 0)
2205 for (i
= 1; i
<= max
; i
++)
2206 if (stat
->nodesizes
[i
] != 0) {
2207 seq_printf(seq
, " %u: %u", i
, stat
->nodesizes
[i
]);
2208 pointers
+= (1<<i
) * stat
->nodesizes
[i
];
2210 seq_putc(seq
, '\n');
2211 seq_printf(seq
, "\tPointers: %u\n", pointers
);
2213 bytes
+= sizeof(struct node
*) * pointers
;
2214 seq_printf(seq
, "Null ptrs: %u\n", stat
->nullpointers
);
2215 seq_printf(seq
, "Total size: %u kB\n", (bytes
+ 1023) / 1024);
2218 #ifdef CONFIG_IP_FIB_TRIE_STATS
2219 static void trie_show_usage(struct seq_file
*seq
,
2220 const struct trie_use_stats
*stats
)
2222 seq_printf(seq
, "\nCounters:\n---------\n");
2223 seq_printf(seq
, "gets = %u\n", stats
->gets
);
2224 seq_printf(seq
, "backtracks = %u\n", stats
->backtrack
);
2225 seq_printf(seq
, "semantic match passed = %u\n",
2226 stats
->semantic_match_passed
);
2227 seq_printf(seq
, "semantic match miss = %u\n",
2228 stats
->semantic_match_miss
);
2229 seq_printf(seq
, "null node hit= %u\n", stats
->null_node_hit
);
2230 seq_printf(seq
, "skipped node resize = %u\n\n",
2231 stats
->resize_node_skipped
);
2233 #endif /* CONFIG_IP_FIB_TRIE_STATS */
2235 static void fib_table_print(struct seq_file
*seq
, struct fib_table
*tb
)
2237 if (tb
->tb_id
== RT_TABLE_LOCAL
)
2238 seq_puts(seq
, "Local:\n");
2239 else if (tb
->tb_id
== RT_TABLE_MAIN
)
2240 seq_puts(seq
, "Main:\n");
2242 seq_printf(seq
, "Id %d:\n", tb
->tb_id
);
2246 static int fib_triestat_seq_show(struct seq_file
*seq
, void *v
)
2248 struct net
*net
= (struct net
*)seq
->private;
2252 "Basic info: size of leaf:"
2253 " %Zd bytes, size of tnode: %Zd bytes.\n",
2254 sizeof(struct leaf
), sizeof(struct tnode
));
2256 for (h
= 0; h
< FIB_TABLE_HASHSZ
; h
++) {
2257 struct hlist_head
*head
= &net
->ipv4
.fib_table_hash
[h
];
2258 struct hlist_node
*node
;
2259 struct fib_table
*tb
;
2261 hlist_for_each_entry_rcu(tb
, node
, head
, tb_hlist
) {
2262 struct trie
*t
= (struct trie
*) tb
->tb_data
;
2263 struct trie_stat stat
;
2268 fib_table_print(seq
, tb
);
2270 trie_collect_stats(t
, &stat
);
2271 trie_show_stats(seq
, &stat
);
2272 #ifdef CONFIG_IP_FIB_TRIE_STATS
2273 trie_show_usage(seq
, &t
->stats
);
2281 static int fib_triestat_seq_open(struct inode
*inode
, struct file
*file
)
2283 return single_open_net(inode
, file
, fib_triestat_seq_show
);
2286 static const struct file_operations fib_triestat_fops
= {
2287 .owner
= THIS_MODULE
,
2288 .open
= fib_triestat_seq_open
,
2290 .llseek
= seq_lseek
,
2291 .release
= single_release_net
,
2294 static struct node
*fib_trie_get_idx(struct seq_file
*seq
, loff_t pos
)
2296 struct fib_trie_iter
*iter
= seq
->private;
2297 struct net
*net
= seq_file_net(seq
);
2301 for (h
= 0; h
< FIB_TABLE_HASHSZ
; h
++) {
2302 struct hlist_head
*head
= &net
->ipv4
.fib_table_hash
[h
];
2303 struct hlist_node
*node
;
2304 struct fib_table
*tb
;
2306 hlist_for_each_entry_rcu(tb
, node
, head
, tb_hlist
) {
2309 for (n
= fib_trie_get_first(iter
,
2310 (struct trie
*) tb
->tb_data
);
2311 n
; n
= fib_trie_get_next(iter
))
2322 static void *fib_trie_seq_start(struct seq_file
*seq
, loff_t
*pos
)
2326 return fib_trie_get_idx(seq
, *pos
);
2329 static void *fib_trie_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
2331 struct fib_trie_iter
*iter
= seq
->private;
2332 struct net
*net
= seq_file_net(seq
);
2333 struct fib_table
*tb
= iter
->tb
;
2334 struct hlist_node
*tb_node
;
2339 /* next node in same table */
2340 n
= fib_trie_get_next(iter
);
2344 /* walk rest of this hash chain */
2345 h
= tb
->tb_id
& (FIB_TABLE_HASHSZ
- 1);
2346 while ( (tb_node
= rcu_dereference(tb
->tb_hlist
.next
)) ) {
2347 tb
= hlist_entry(tb_node
, struct fib_table
, tb_hlist
);
2348 n
= fib_trie_get_first(iter
, (struct trie
*) tb
->tb_data
);
2353 /* new hash chain */
2354 while (++h
< FIB_TABLE_HASHSZ
) {
2355 struct hlist_head
*head
= &net
->ipv4
.fib_table_hash
[h
];
2356 hlist_for_each_entry_rcu(tb
, tb_node
, head
, tb_hlist
) {
2357 n
= fib_trie_get_first(iter
, (struct trie
*) tb
->tb_data
);
2369 static void fib_trie_seq_stop(struct seq_file
*seq
, void *v
)
2375 static void seq_indent(struct seq_file
*seq
, int n
)
2377 while (n
-- > 0) seq_puts(seq
, " ");
2380 static inline const char *rtn_scope(char *buf
, size_t len
, enum rt_scope_t s
)
2383 case RT_SCOPE_UNIVERSE
: return "universe";
2384 case RT_SCOPE_SITE
: return "site";
2385 case RT_SCOPE_LINK
: return "link";
2386 case RT_SCOPE_HOST
: return "host";
2387 case RT_SCOPE_NOWHERE
: return "nowhere";
2389 snprintf(buf
, len
, "scope=%d", s
);
2394 static const char *rtn_type_names
[__RTN_MAX
] = {
2395 [RTN_UNSPEC
] = "UNSPEC",
2396 [RTN_UNICAST
] = "UNICAST",
2397 [RTN_LOCAL
] = "LOCAL",
2398 [RTN_BROADCAST
] = "BROADCAST",
2399 [RTN_ANYCAST
] = "ANYCAST",
2400 [RTN_MULTICAST
] = "MULTICAST",
2401 [RTN_BLACKHOLE
] = "BLACKHOLE",
2402 [RTN_UNREACHABLE
] = "UNREACHABLE",
2403 [RTN_PROHIBIT
] = "PROHIBIT",
2404 [RTN_THROW
] = "THROW",
2406 [RTN_XRESOLVE
] = "XRESOLVE",
2409 static inline const char *rtn_type(char *buf
, size_t len
, unsigned t
)
2411 if (t
< __RTN_MAX
&& rtn_type_names
[t
])
2412 return rtn_type_names
[t
];
2413 snprintf(buf
, len
, "type %u", t
);
2417 /* Pretty print the trie */
2418 static int fib_trie_seq_show(struct seq_file
*seq
, void *v
)
2420 const struct fib_trie_iter
*iter
= seq
->private;
2423 if (!node_parent_rcu(n
))
2424 fib_table_print(seq
, iter
->tb
);
2427 struct tnode
*tn
= (struct tnode
*) n
;
2428 __be32 prf
= htonl(mask_pfx(tn
->key
, tn
->pos
));
2430 seq_indent(seq
, iter
->depth
-1);
2431 seq_printf(seq
, " +-- %pI4/%d %d %d %d\n",
2432 &prf
, tn
->pos
, tn
->bits
, tn
->full_children
,
2433 tn
->empty_children
);
2436 struct leaf
*l
= (struct leaf
*) n
;
2437 struct leaf_info
*li
;
2438 struct hlist_node
*node
;
2439 __be32 val
= htonl(l
->key
);
2441 seq_indent(seq
, iter
->depth
);
2442 seq_printf(seq
, " |-- %pI4\n", &val
);
2444 hlist_for_each_entry_rcu(li
, node
, &l
->list
, hlist
) {
2445 struct fib_alias
*fa
;
2447 list_for_each_entry_rcu(fa
, &li
->falh
, fa_list
) {
2448 char buf1
[32], buf2
[32];
2450 seq_indent(seq
, iter
->depth
+1);
2451 seq_printf(seq
, " /%d %s %s", li
->plen
,
2452 rtn_scope(buf1
, sizeof(buf1
),
2454 rtn_type(buf2
, sizeof(buf2
),
2457 seq_printf(seq
, " tos=%d", fa
->fa_tos
);
2458 seq_putc(seq
, '\n');
2466 static const struct seq_operations fib_trie_seq_ops
= {
2467 .start
= fib_trie_seq_start
,
2468 .next
= fib_trie_seq_next
,
2469 .stop
= fib_trie_seq_stop
,
2470 .show
= fib_trie_seq_show
,
2473 static int fib_trie_seq_open(struct inode
*inode
, struct file
*file
)
2475 return seq_open_net(inode
, file
, &fib_trie_seq_ops
,
2476 sizeof(struct fib_trie_iter
));
2479 static const struct file_operations fib_trie_fops
= {
2480 .owner
= THIS_MODULE
,
2481 .open
= fib_trie_seq_open
,
2483 .llseek
= seq_lseek
,
2484 .release
= seq_release_net
,
2487 struct fib_route_iter
{
2488 struct seq_net_private p
;
2489 struct trie
*main_trie
;
2494 static struct leaf
*fib_route_get_idx(struct fib_route_iter
*iter
, loff_t pos
)
2496 struct leaf
*l
= NULL
;
2497 struct trie
*t
= iter
->main_trie
;
2499 /* use cache location of last found key */
2500 if (iter
->pos
> 0 && pos
>= iter
->pos
&& (l
= fib_find_node(t
, iter
->key
)))
2504 l
= trie_firstleaf(t
);
2507 while (l
&& pos
-- > 0) {
2509 l
= trie_nextleaf(l
);
2513 iter
->key
= pos
; /* remember it */
2515 iter
->pos
= 0; /* forget it */
2520 static void *fib_route_seq_start(struct seq_file
*seq
, loff_t
*pos
)
2523 struct fib_route_iter
*iter
= seq
->private;
2524 struct fib_table
*tb
;
2527 tb
= fib_get_table(seq_file_net(seq
), RT_TABLE_MAIN
);
2531 iter
->main_trie
= (struct trie
*) tb
->tb_data
;
2533 return SEQ_START_TOKEN
;
2535 return fib_route_get_idx(iter
, *pos
- 1);
2538 static void *fib_route_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
2540 struct fib_route_iter
*iter
= seq
->private;
2544 if (v
== SEQ_START_TOKEN
) {
2546 l
= trie_firstleaf(iter
->main_trie
);
2549 l
= trie_nextleaf(l
);
2559 static void fib_route_seq_stop(struct seq_file
*seq
, void *v
)
2565 static unsigned fib_flag_trans(int type
, __be32 mask
, const struct fib_info
*fi
)
2567 static unsigned type2flags
[RTN_MAX
+ 1] = {
2568 [7] = RTF_REJECT
, [8] = RTF_REJECT
,
2570 unsigned flags
= type2flags
[type
];
2572 if (fi
&& fi
->fib_nh
->nh_gw
)
2573 flags
|= RTF_GATEWAY
;
2574 if (mask
== htonl(0xFFFFFFFF))
2581 * This outputs /proc/net/route.
2582 * The format of the file is not supposed to be changed
2583 * and needs to be same as fib_hash output to avoid breaking
2586 static int fib_route_seq_show(struct seq_file
*seq
, void *v
)
2589 struct leaf_info
*li
;
2590 struct hlist_node
*node
;
2592 if (v
== SEQ_START_TOKEN
) {
2593 seq_printf(seq
, "%-127s\n", "Iface\tDestination\tGateway "
2594 "\tFlags\tRefCnt\tUse\tMetric\tMask\t\tMTU"
2599 hlist_for_each_entry_rcu(li
, node
, &l
->list
, hlist
) {
2600 struct fib_alias
*fa
;
2601 __be32 mask
, prefix
;
2603 mask
= inet_make_mask(li
->plen
);
2604 prefix
= htonl(l
->key
);
2606 list_for_each_entry_rcu(fa
, &li
->falh
, fa_list
) {
2607 const struct fib_info
*fi
= fa
->fa_info
;
2608 unsigned flags
= fib_flag_trans(fa
->fa_type
, mask
, fi
);
2611 if (fa
->fa_type
== RTN_BROADCAST
2612 || fa
->fa_type
== RTN_MULTICAST
)
2617 "%s\t%08X\t%08X\t%04X\t%d\t%u\t"
2618 "%d\t%08X\t%d\t%u\t%u%n",
2619 fi
->fib_dev
? fi
->fib_dev
->name
: "*",
2621 fi
->fib_nh
->nh_gw
, flags
, 0, 0,
2625 fi
->fib_advmss
+ 40 : 0),
2627 fi
->fib_rtt
>> 3, &len
);
2630 "*\t%08X\t%08X\t%04X\t%d\t%u\t"
2631 "%d\t%08X\t%d\t%u\t%u%n",
2632 prefix
, 0, flags
, 0, 0, 0,
2633 mask
, 0, 0, 0, &len
);
2635 seq_printf(seq
, "%*s\n", 127 - len
, "");
2642 static const struct seq_operations fib_route_seq_ops
= {
2643 .start
= fib_route_seq_start
,
2644 .next
= fib_route_seq_next
,
2645 .stop
= fib_route_seq_stop
,
2646 .show
= fib_route_seq_show
,
2649 static int fib_route_seq_open(struct inode
*inode
, struct file
*file
)
2651 return seq_open_net(inode
, file
, &fib_route_seq_ops
,
2652 sizeof(struct fib_route_iter
));
2655 static const struct file_operations fib_route_fops
= {
2656 .owner
= THIS_MODULE
,
2657 .open
= fib_route_seq_open
,
2659 .llseek
= seq_lseek
,
2660 .release
= seq_release_net
,
2663 int __net_init
fib_proc_init(struct net
*net
)
2665 if (!proc_net_fops_create(net
, "fib_trie", S_IRUGO
, &fib_trie_fops
))
2668 if (!proc_net_fops_create(net
, "fib_triestat", S_IRUGO
,
2669 &fib_triestat_fops
))
2672 if (!proc_net_fops_create(net
, "route", S_IRUGO
, &fib_route_fops
))
2678 proc_net_remove(net
, "fib_triestat");
2680 proc_net_remove(net
, "fib_trie");
2685 void __net_exit
fib_proc_exit(struct net
*net
)
2687 proc_net_remove(net
, "fib_trie");
2688 proc_net_remove(net
, "fib_triestat");
2689 proc_net_remove(net
, "route");
2692 #endif /* CONFIG_PROC_FS */