2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version
5 * 2 of the License, or (at your option) any later version.
7 * Robert Olsson <robert.olsson@its.uu.se> Uppsala Universitet
8 * & Swedish University of Agricultural Sciences.
10 * Jens Laas <jens.laas@data.slu.se> Swedish University of
11 * Agricultural Sciences.
13 * Hans Liss <hans.liss@its.uu.se> Uppsala Universitet
15 * This work is based on the LPC-trie which is originally descibed in:
17 * An experimental study of compression methods for dynamic tries
18 * Stefan Nilsson and Matti Tikkanen. Algorithmica, 33(1):19-33, 2002.
19 * http://www.nada.kth.se/~snilsson/public/papers/dyntrie2/
22 * IP-address lookup using LC-tries. Stefan Nilsson and Gunnar Karlsson
23 * IEEE Journal on Selected Areas in Communications, 17(6):1083-1092, June 1999
26 * Code from fib_hash has been reused which includes the following header:
29 * INET An implementation of the TCP/IP protocol suite for the LINUX
30 * operating system. INET is implemented using the BSD Socket
31 * interface as the means of communication with the user level.
33 * IPv4 FIB: lookup engine and maintenance routines.
36 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
38 * This program is free software; you can redistribute it and/or
39 * modify it under the terms of the GNU General Public License
40 * as published by the Free Software Foundation; either version
41 * 2 of the License, or (at your option) any later version.
43 * Substantial contributions to this work comes from:
45 * David S. Miller, <davem@davemloft.net>
46 * Stephen Hemminger <shemminger@osdl.org>
47 * Paul E. McKenney <paulmck@us.ibm.com>
48 * Patrick McHardy <kaber@trash.net>
51 #define VERSION "0.408"
53 #include <asm/uaccess.h>
54 #include <asm/system.h>
55 #include <linux/bitops.h>
56 #include <linux/types.h>
57 #include <linux/kernel.h>
59 #include <linux/string.h>
60 #include <linux/socket.h>
61 #include <linux/sockios.h>
62 #include <linux/errno.h>
64 #include <linux/inet.h>
65 #include <linux/inetdevice.h>
66 #include <linux/netdevice.h>
67 #include <linux/if_arp.h>
68 #include <linux/proc_fs.h>
69 #include <linux/rcupdate.h>
70 #include <linux/skbuff.h>
71 #include <linux/netlink.h>
72 #include <linux/init.h>
73 #include <linux/list.h>
74 #include <net/net_namespace.h>
76 #include <net/protocol.h>
77 #include <net/route.h>
80 #include <net/ip_fib.h>
81 #include "fib_lookup.h"
83 #define MAX_STAT_DEPTH 32
85 #define KEYLENGTH (8*sizeof(t_key))
87 typedef unsigned int t_key
;
91 #define NODE_TYPE_MASK 0x1UL
92 #define NODE_TYPE(node) ((node)->parent & NODE_TYPE_MASK)
94 #define IS_TNODE(n) (!(n->parent & T_LEAF))
95 #define IS_LEAF(n) (n->parent & T_LEAF)
103 unsigned long parent
;
105 struct hlist_head list
;
110 struct hlist_node hlist
;
113 struct list_head falh
;
117 unsigned long parent
;
119 unsigned char pos
; /* 2log(KEYLENGTH) bits needed */
120 unsigned char bits
; /* 2log(KEYLENGTH) bits needed */
121 unsigned int full_children
; /* KEYLENGTH bits needed */
122 unsigned int empty_children
; /* KEYLENGTH bits needed */
125 struct work_struct work
;
126 struct tnode
*tnode_free
;
128 struct node
*child
[0];
131 #ifdef CONFIG_IP_FIB_TRIE_STATS
132 struct trie_use_stats
{
134 unsigned int backtrack
;
135 unsigned int semantic_match_passed
;
136 unsigned int semantic_match_miss
;
137 unsigned int null_node_hit
;
138 unsigned int resize_node_skipped
;
143 unsigned int totdepth
;
144 unsigned int maxdepth
;
147 unsigned int nullpointers
;
148 unsigned int prefixes
;
149 unsigned int nodesizes
[MAX_STAT_DEPTH
];
154 #ifdef CONFIG_IP_FIB_TRIE_STATS
155 struct trie_use_stats stats
;
159 static void put_child(struct trie
*t
, struct tnode
*tn
, int i
, struct node
*n
);
160 static void tnode_put_child_reorg(struct tnode
*tn
, int i
, struct node
*n
,
162 static struct node
*resize(struct trie
*t
, struct tnode
*tn
);
163 static struct tnode
*inflate(struct trie
*t
, struct tnode
*tn
);
164 static struct tnode
*halve(struct trie
*t
, struct tnode
*tn
);
165 /* tnodes to free after resize(); protected by RTNL */
166 static struct tnode
*tnode_free_head
;
167 static size_t tnode_free_size
;
170 * synchronize_rcu after call_rcu for that many pages; it should be especially
171 * useful before resizing the root node with PREEMPT_NONE configs; the value was
172 * obtained experimentally, aiming to avoid visible slowdown.
174 static const int sync_pages
= 128;
176 static struct kmem_cache
*fn_alias_kmem __read_mostly
;
177 static struct kmem_cache
*trie_leaf_kmem __read_mostly
;
179 static inline struct tnode
*node_parent(struct node
*node
)
181 return (struct tnode
*)(node
->parent
& ~NODE_TYPE_MASK
);
184 static inline struct tnode
*node_parent_rcu(struct node
*node
)
186 struct tnode
*ret
= node_parent(node
);
188 return rcu_dereference(ret
);
191 /* Same as rcu_assign_pointer
192 * but that macro() assumes that value is a pointer.
194 static inline void node_set_parent(struct node
*node
, struct tnode
*ptr
)
197 node
->parent
= (unsigned long)ptr
| NODE_TYPE(node
);
200 static inline struct node
*tnode_get_child(struct tnode
*tn
, unsigned int i
)
202 BUG_ON(i
>= 1U << tn
->bits
);
207 static inline struct node
*tnode_get_child_rcu(struct tnode
*tn
, unsigned int i
)
209 struct node
*ret
= tnode_get_child(tn
, i
);
211 return rcu_dereference(ret
);
214 static inline int tnode_child_length(const struct tnode
*tn
)
216 return 1 << tn
->bits
;
219 static inline t_key
mask_pfx(t_key k
, unsigned short l
)
221 return (l
== 0) ? 0 : k
>> (KEYLENGTH
-l
) << (KEYLENGTH
-l
);
224 static inline t_key
tkey_extract_bits(t_key a
, int offset
, int bits
)
226 if (offset
< KEYLENGTH
)
227 return ((t_key
)(a
<< offset
)) >> (KEYLENGTH
- bits
);
232 static inline int tkey_equals(t_key a
, t_key b
)
237 static inline int tkey_sub_equals(t_key a
, int offset
, int bits
, t_key b
)
239 if (bits
== 0 || offset
>= KEYLENGTH
)
241 bits
= bits
> KEYLENGTH
? KEYLENGTH
: bits
;
242 return ((a
^ b
) << offset
) >> (KEYLENGTH
- bits
) == 0;
245 static inline int tkey_mismatch(t_key a
, int offset
, t_key b
)
252 while ((diff
<< i
) >> (KEYLENGTH
-1) == 0)
258 To understand this stuff, an understanding of keys and all their bits is
259 necessary. Every node in the trie has a key associated with it, but not
260 all of the bits in that key are significant.
262 Consider a node 'n' and its parent 'tp'.
264 If n is a leaf, every bit in its key is significant. Its presence is
265 necessitated by path compression, since during a tree traversal (when
266 searching for a leaf - unless we are doing an insertion) we will completely
267 ignore all skipped bits we encounter. Thus we need to verify, at the end of
268 a potentially successful search, that we have indeed been walking the
271 Note that we can never "miss" the correct key in the tree if present by
272 following the wrong path. Path compression ensures that segments of the key
273 that are the same for all keys with a given prefix are skipped, but the
274 skipped part *is* identical for each node in the subtrie below the skipped
275 bit! trie_insert() in this implementation takes care of that - note the
276 call to tkey_sub_equals() in trie_insert().
278 if n is an internal node - a 'tnode' here, the various parts of its key
279 have many different meanings.
282 _________________________________________________________________
283 | i | i | i | i | i | i | i | N | N | N | S | S | S | S | S | C |
284 -----------------------------------------------------------------
285 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
287 _________________________________________________________________
288 | C | C | C | u | u | u | u | u | u | u | u | u | u | u | u | u |
289 -----------------------------------------------------------------
290 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
297 First, let's just ignore the bits that come before the parent tp, that is
298 the bits from 0 to (tp->pos-1). They are *known* but at this point we do
299 not use them for anything.
301 The bits from (tp->pos) to (tp->pos + tp->bits - 1) - "N", above - are the
302 index into the parent's child array. That is, they will be used to find
303 'n' among tp's children.
305 The bits from (tp->pos + tp->bits) to (n->pos - 1) - "S" - are skipped bits
308 All the bits we have seen so far are significant to the node n. The rest
309 of the bits are really not needed or indeed known in n->key.
311 The bits from (n->pos) to (n->pos + n->bits - 1) - "C" - are the index into
312 n's child array, and will of course be different for each child.
315 The rest of the bits, from (n->pos + n->bits) onward, are completely unknown
320 static inline void check_tnode(const struct tnode
*tn
)
322 WARN_ON(tn
&& tn
->pos
+tn
->bits
> 32);
325 static const int halve_threshold
= 25;
326 static const int inflate_threshold
= 50;
327 static const int halve_threshold_root
= 15;
328 static const int inflate_threshold_root
= 25;
330 static int inflate_threshold_root_fix
;
331 #define INFLATE_FIX_MAX 10 /* a comment in resize() */
333 static void __alias_free_mem(struct rcu_head
*head
)
335 struct fib_alias
*fa
= container_of(head
, struct fib_alias
, rcu
);
336 kmem_cache_free(fn_alias_kmem
, fa
);
339 static inline void alias_free_mem_rcu(struct fib_alias
*fa
)
341 call_rcu(&fa
->rcu
, __alias_free_mem
);
344 static void __leaf_free_rcu(struct rcu_head
*head
)
346 struct leaf
*l
= container_of(head
, struct leaf
, rcu
);
347 kmem_cache_free(trie_leaf_kmem
, l
);
350 static inline void free_leaf(struct leaf
*l
)
352 call_rcu_bh(&l
->rcu
, __leaf_free_rcu
);
355 static void __leaf_info_free_rcu(struct rcu_head
*head
)
357 kfree(container_of(head
, struct leaf_info
, rcu
));
360 static inline void free_leaf_info(struct leaf_info
*leaf
)
362 call_rcu(&leaf
->rcu
, __leaf_info_free_rcu
);
365 static struct tnode
*tnode_alloc(size_t size
)
367 if (size
<= PAGE_SIZE
)
368 return kzalloc(size
, GFP_KERNEL
);
370 return __vmalloc(size
, GFP_KERNEL
| __GFP_ZERO
, PAGE_KERNEL
);
373 static void __tnode_vfree(struct work_struct
*arg
)
375 struct tnode
*tn
= container_of(arg
, struct tnode
, work
);
379 static void __tnode_free_rcu(struct rcu_head
*head
)
381 struct tnode
*tn
= container_of(head
, struct tnode
, rcu
);
382 size_t size
= sizeof(struct tnode
) +
383 (sizeof(struct node
*) << tn
->bits
);
385 if (size
<= PAGE_SIZE
)
388 INIT_WORK(&tn
->work
, __tnode_vfree
);
389 schedule_work(&tn
->work
);
393 static inline void tnode_free(struct tnode
*tn
)
396 free_leaf((struct leaf
*) tn
);
398 call_rcu(&tn
->rcu
, __tnode_free_rcu
);
401 static void tnode_free_safe(struct tnode
*tn
)
404 tn
->tnode_free
= tnode_free_head
;
405 tnode_free_head
= tn
;
406 tnode_free_size
+= sizeof(struct tnode
) +
407 (sizeof(struct node
*) << tn
->bits
);
410 static void tnode_free_flush(void)
414 while ((tn
= tnode_free_head
)) {
415 tnode_free_head
= tn
->tnode_free
;
416 tn
->tnode_free
= NULL
;
420 if (tnode_free_size
>= PAGE_SIZE
* sync_pages
) {
426 static struct leaf
*leaf_new(void)
428 struct leaf
*l
= kmem_cache_alloc(trie_leaf_kmem
, GFP_KERNEL
);
431 INIT_HLIST_HEAD(&l
->list
);
436 static struct leaf_info
*leaf_info_new(int plen
)
438 struct leaf_info
*li
= kmalloc(sizeof(struct leaf_info
), GFP_KERNEL
);
441 INIT_LIST_HEAD(&li
->falh
);
446 static struct tnode
*tnode_new(t_key key
, int pos
, int bits
)
448 size_t sz
= sizeof(struct tnode
) + (sizeof(struct node
*) << bits
);
449 struct tnode
*tn
= tnode_alloc(sz
);
452 tn
->parent
= T_TNODE
;
456 tn
->full_children
= 0;
457 tn
->empty_children
= 1<<bits
;
460 pr_debug("AT %p s=%u %lu\n", tn
, (unsigned int) sizeof(struct tnode
),
461 (unsigned long) (sizeof(struct node
) << bits
));
466 * Check whether a tnode 'n' is "full", i.e. it is an internal node
467 * and no bits are skipped. See discussion in dyntree paper p. 6
470 static inline int tnode_full(const struct tnode
*tn
, const struct node
*n
)
472 if (n
== NULL
|| IS_LEAF(n
))
475 return ((struct tnode
*) n
)->pos
== tn
->pos
+ tn
->bits
;
478 static inline void put_child(struct trie
*t
, struct tnode
*tn
, int i
,
481 tnode_put_child_reorg(tn
, i
, n
, -1);
485 * Add a child at position i overwriting the old value.
486 * Update the value of full_children and empty_children.
489 static void tnode_put_child_reorg(struct tnode
*tn
, int i
, struct node
*n
,
492 struct node
*chi
= tn
->child
[i
];
495 BUG_ON(i
>= 1<<tn
->bits
);
497 /* update emptyChildren */
498 if (n
== NULL
&& chi
!= NULL
)
499 tn
->empty_children
++;
500 else if (n
!= NULL
&& chi
== NULL
)
501 tn
->empty_children
--;
503 /* update fullChildren */
505 wasfull
= tnode_full(tn
, chi
);
507 isfull
= tnode_full(tn
, n
);
508 if (wasfull
&& !isfull
)
510 else if (!wasfull
&& isfull
)
514 node_set_parent(n
, tn
);
516 rcu_assign_pointer(tn
->child
[i
], n
);
519 static struct node
*resize(struct trie
*t
, struct tnode
*tn
)
523 struct tnode
*old_tn
;
524 int inflate_threshold_use
;
525 int halve_threshold_use
;
531 pr_debug("In tnode_resize %p inflate_threshold=%d threshold=%d\n",
532 tn
, inflate_threshold
, halve_threshold
);
535 if (tn
->empty_children
== tnode_child_length(tn
)) {
540 if (tn
->empty_children
== tnode_child_length(tn
) - 1)
541 for (i
= 0; i
< tnode_child_length(tn
); i
++) {
548 /* compress one level */
549 node_set_parent(n
, NULL
);
554 * Double as long as the resulting node has a number of
555 * nonempty nodes that are above the threshold.
559 * From "Implementing a dynamic compressed trie" by Stefan Nilsson of
560 * the Helsinki University of Technology and Matti Tikkanen of Nokia
561 * Telecommunications, page 6:
562 * "A node is doubled if the ratio of non-empty children to all
563 * children in the *doubled* node is at least 'high'."
565 * 'high' in this instance is the variable 'inflate_threshold'. It
566 * is expressed as a percentage, so we multiply it with
567 * tnode_child_length() and instead of multiplying by 2 (since the
568 * child array will be doubled by inflate()) and multiplying
569 * the left-hand side by 100 (to handle the percentage thing) we
570 * multiply the left-hand side by 50.
572 * The left-hand side may look a bit weird: tnode_child_length(tn)
573 * - tn->empty_children is of course the number of non-null children
574 * in the current node. tn->full_children is the number of "full"
575 * children, that is non-null tnodes with a skip value of 0.
576 * All of those will be doubled in the resulting inflated tnode, so
577 * we just count them one extra time here.
579 * A clearer way to write this would be:
581 * to_be_doubled = tn->full_children;
582 * not_to_be_doubled = tnode_child_length(tn) - tn->empty_children -
585 * new_child_length = tnode_child_length(tn) * 2;
587 * new_fill_factor = 100 * (not_to_be_doubled + 2*to_be_doubled) /
589 * if (new_fill_factor >= inflate_threshold)
591 * ...and so on, tho it would mess up the while () loop.
594 * 100 * (not_to_be_doubled + 2*to_be_doubled) / new_child_length >=
598 * 100 * (not_to_be_doubled + 2*to_be_doubled) >=
599 * inflate_threshold * new_child_length
601 * expand not_to_be_doubled and to_be_doubled, and shorten:
602 * 100 * (tnode_child_length(tn) - tn->empty_children +
603 * tn->full_children) >= inflate_threshold * new_child_length
605 * expand new_child_length:
606 * 100 * (tnode_child_length(tn) - tn->empty_children +
607 * tn->full_children) >=
608 * inflate_threshold * tnode_child_length(tn) * 2
611 * 50 * (tn->full_children + tnode_child_length(tn) -
612 * tn->empty_children) >= inflate_threshold *
613 * tnode_child_length(tn)
619 /* Keep root node larger */
622 inflate_threshold_use
= inflate_threshold_root
+
623 inflate_threshold_root_fix
;
625 inflate_threshold_use
= inflate_threshold
;
629 while ((tn
->full_children
> 0 && max_resize
-- &&
630 50 * (tn
->full_children
+ tnode_child_length(tn
)
631 - tn
->empty_children
)
632 >= inflate_threshold_use
* tnode_child_length(tn
))) {
639 #ifdef CONFIG_IP_FIB_TRIE_STATS
640 t
->stats
.resize_node_skipped
++;
646 if (max_resize
< 0) {
649 * It was observed that during large updates even
650 * inflate_threshold_root = 35 might be needed to avoid
651 * this warning; but it should be temporary, so let's
652 * try to handle this automatically.
654 if (inflate_threshold_root_fix
< INFLATE_FIX_MAX
)
655 inflate_threshold_root_fix
++;
657 pr_warning("Fix inflate_threshold_root."
658 " Now=%d size=%d bits fix=%d\n",
659 inflate_threshold_root
, tn
->bits
,
660 inflate_threshold_root_fix
);
662 pr_warning("Fix inflate_threshold."
663 " Now=%d size=%d bits\n",
664 inflate_threshold
, tn
->bits
);
666 } else if (max_resize
> 3 && !tn
->parent
&& inflate_threshold_root_fix
)
667 inflate_threshold_root_fix
--;
672 * Halve as long as the number of empty children in this
673 * node is above threshold.
677 /* Keep root node larger */
680 halve_threshold_use
= halve_threshold_root
;
682 halve_threshold_use
= halve_threshold
;
686 while (tn
->bits
> 1 && max_resize
-- &&
687 100 * (tnode_child_length(tn
) - tn
->empty_children
) <
688 halve_threshold_use
* tnode_child_length(tn
)) {
694 #ifdef CONFIG_IP_FIB_TRIE_STATS
695 t
->stats
.resize_node_skipped
++;
701 if (max_resize
< 0) {
703 pr_warning("Fix halve_threshold_root."
704 " Now=%d size=%d bits\n",
705 halve_threshold_root
, tn
->bits
);
707 pr_warning("Fix halve_threshold."
708 " Now=%d size=%d bits\n",
709 halve_threshold
, tn
->bits
);
712 /* Only one child remains */
713 if (tn
->empty_children
== tnode_child_length(tn
) - 1)
714 for (i
= 0; i
< tnode_child_length(tn
); i
++) {
721 /* compress one level */
723 node_set_parent(n
, NULL
);
728 return (struct node
*) tn
;
731 static struct tnode
*inflate(struct trie
*t
, struct tnode
*tn
)
733 struct tnode
*oldtnode
= tn
;
734 int olen
= tnode_child_length(tn
);
737 pr_debug("In inflate\n");
739 tn
= tnode_new(oldtnode
->key
, oldtnode
->pos
, oldtnode
->bits
+ 1);
742 return ERR_PTR(-ENOMEM
);
745 * Preallocate and store tnodes before the actual work so we
746 * don't get into an inconsistent state if memory allocation
747 * fails. In case of failure we return the oldnode and inflate
748 * of tnode is ignored.
751 for (i
= 0; i
< olen
; i
++) {
754 inode
= (struct tnode
*) tnode_get_child(oldtnode
, i
);
757 inode
->pos
== oldtnode
->pos
+ oldtnode
->bits
&&
759 struct tnode
*left
, *right
;
760 t_key m
= ~0U << (KEYLENGTH
- 1) >> inode
->pos
;
762 left
= tnode_new(inode
->key
&(~m
), inode
->pos
+ 1,
767 right
= tnode_new(inode
->key
|m
, inode
->pos
+ 1,
775 put_child(t
, tn
, 2*i
, (struct node
*) left
);
776 put_child(t
, tn
, 2*i
+1, (struct node
*) right
);
780 for (i
= 0; i
< olen
; i
++) {
782 struct node
*node
= tnode_get_child(oldtnode
, i
);
783 struct tnode
*left
, *right
;
790 /* A leaf or an internal node with skipped bits */
792 if (IS_LEAF(node
) || ((struct tnode
*) node
)->pos
>
793 tn
->pos
+ tn
->bits
- 1) {
794 if (tkey_extract_bits(node
->key
,
795 oldtnode
->pos
+ oldtnode
->bits
,
797 put_child(t
, tn
, 2*i
, node
);
799 put_child(t
, tn
, 2*i
+1, node
);
803 /* An internal node with two children */
804 inode
= (struct tnode
*) node
;
806 if (inode
->bits
== 1) {
807 put_child(t
, tn
, 2*i
, inode
->child
[0]);
808 put_child(t
, tn
, 2*i
+1, inode
->child
[1]);
810 tnode_free_safe(inode
);
814 /* An internal node with more than two children */
816 /* We will replace this node 'inode' with two new
817 * ones, 'left' and 'right', each with half of the
818 * original children. The two new nodes will have
819 * a position one bit further down the key and this
820 * means that the "significant" part of their keys
821 * (see the discussion near the top of this file)
822 * will differ by one bit, which will be "0" in
823 * left's key and "1" in right's key. Since we are
824 * moving the key position by one step, the bit that
825 * we are moving away from - the bit at position
826 * (inode->pos) - is the one that will differ between
827 * left and right. So... we synthesize that bit in the
829 * The mask 'm' below will be a single "one" bit at
830 * the position (inode->pos)
833 /* Use the old key, but set the new significant
837 left
= (struct tnode
*) tnode_get_child(tn
, 2*i
);
838 put_child(t
, tn
, 2*i
, NULL
);
842 right
= (struct tnode
*) tnode_get_child(tn
, 2*i
+1);
843 put_child(t
, tn
, 2*i
+1, NULL
);
847 size
= tnode_child_length(left
);
848 for (j
= 0; j
< size
; j
++) {
849 put_child(t
, left
, j
, inode
->child
[j
]);
850 put_child(t
, right
, j
, inode
->child
[j
+ size
]);
852 put_child(t
, tn
, 2*i
, resize(t
, left
));
853 put_child(t
, tn
, 2*i
+1, resize(t
, right
));
855 tnode_free_safe(inode
);
857 tnode_free_safe(oldtnode
);
861 int size
= tnode_child_length(tn
);
864 for (j
= 0; j
< size
; j
++)
866 tnode_free((struct tnode
*)tn
->child
[j
]);
870 return ERR_PTR(-ENOMEM
);
874 static struct tnode
*halve(struct trie
*t
, struct tnode
*tn
)
876 struct tnode
*oldtnode
= tn
;
877 struct node
*left
, *right
;
879 int olen
= tnode_child_length(tn
);
881 pr_debug("In halve\n");
883 tn
= tnode_new(oldtnode
->key
, oldtnode
->pos
, oldtnode
->bits
- 1);
886 return ERR_PTR(-ENOMEM
);
889 * Preallocate and store tnodes before the actual work so we
890 * don't get into an inconsistent state if memory allocation
891 * fails. In case of failure we return the oldnode and halve
892 * of tnode is ignored.
895 for (i
= 0; i
< olen
; i
+= 2) {
896 left
= tnode_get_child(oldtnode
, i
);
897 right
= tnode_get_child(oldtnode
, i
+1);
899 /* Two nonempty children */
903 newn
= tnode_new(left
->key
, tn
->pos
+ tn
->bits
, 1);
908 put_child(t
, tn
, i
/2, (struct node
*)newn
);
913 for (i
= 0; i
< olen
; i
+= 2) {
914 struct tnode
*newBinNode
;
916 left
= tnode_get_child(oldtnode
, i
);
917 right
= tnode_get_child(oldtnode
, i
+1);
919 /* At least one of the children is empty */
921 if (right
== NULL
) /* Both are empty */
923 put_child(t
, tn
, i
/2, right
);
928 put_child(t
, tn
, i
/2, left
);
932 /* Two nonempty children */
933 newBinNode
= (struct tnode
*) tnode_get_child(tn
, i
/2);
934 put_child(t
, tn
, i
/2, NULL
);
935 put_child(t
, newBinNode
, 0, left
);
936 put_child(t
, newBinNode
, 1, right
);
937 put_child(t
, tn
, i
/2, resize(t
, newBinNode
));
939 tnode_free_safe(oldtnode
);
943 int size
= tnode_child_length(tn
);
946 for (j
= 0; j
< size
; j
++)
948 tnode_free((struct tnode
*)tn
->child
[j
]);
952 return ERR_PTR(-ENOMEM
);
956 /* readside must use rcu_read_lock currently dump routines
957 via get_fa_head and dump */
959 static struct leaf_info
*find_leaf_info(struct leaf
*l
, int plen
)
961 struct hlist_head
*head
= &l
->list
;
962 struct hlist_node
*node
;
963 struct leaf_info
*li
;
965 hlist_for_each_entry_rcu(li
, node
, head
, hlist
)
966 if (li
->plen
== plen
)
972 static inline struct list_head
*get_fa_head(struct leaf
*l
, int plen
)
974 struct leaf_info
*li
= find_leaf_info(l
, plen
);
982 static void insert_leaf_info(struct hlist_head
*head
, struct leaf_info
*new)
984 struct leaf_info
*li
= NULL
, *last
= NULL
;
985 struct hlist_node
*node
;
987 if (hlist_empty(head
)) {
988 hlist_add_head_rcu(&new->hlist
, head
);
990 hlist_for_each_entry(li
, node
, head
, hlist
) {
991 if (new->plen
> li
->plen
)
997 hlist_add_after_rcu(&last
->hlist
, &new->hlist
);
999 hlist_add_before_rcu(&new->hlist
, &li
->hlist
);
1003 /* rcu_read_lock needs to be hold by caller from readside */
1005 static struct leaf
*
1006 fib_find_node(struct trie
*t
, u32 key
)
1013 n
= rcu_dereference(t
->trie
);
1015 while (n
!= NULL
&& NODE_TYPE(n
) == T_TNODE
) {
1016 tn
= (struct tnode
*) n
;
1020 if (tkey_sub_equals(tn
->key
, pos
, tn
->pos
-pos
, key
)) {
1021 pos
= tn
->pos
+ tn
->bits
;
1022 n
= tnode_get_child_rcu(tn
,
1023 tkey_extract_bits(key
,
1029 /* Case we have found a leaf. Compare prefixes */
1031 if (n
!= NULL
&& IS_LEAF(n
) && tkey_equals(key
, n
->key
))
1032 return (struct leaf
*)n
;
1037 static void trie_rebalance(struct trie
*t
, struct tnode
*tn
)
1045 while (tn
!= NULL
&& (tp
= node_parent((struct node
*)tn
)) != NULL
) {
1046 cindex
= tkey_extract_bits(key
, tp
->pos
, tp
->bits
);
1047 wasfull
= tnode_full(tp
, tnode_get_child(tp
, cindex
));
1048 tn
= (struct tnode
*) resize(t
, (struct tnode
*)tn
);
1050 tnode_put_child_reorg((struct tnode
*)tp
, cindex
,
1051 (struct node
*)tn
, wasfull
);
1053 tp
= node_parent((struct node
*) tn
);
1055 rcu_assign_pointer(t
->trie
, (struct node
*)tn
);
1063 /* Handle last (top) tnode */
1065 tn
= (struct tnode
*)resize(t
, (struct tnode
*)tn
);
1067 rcu_assign_pointer(t
->trie
, (struct node
*)tn
);
1073 /* only used from updater-side */
1075 static struct list_head
*fib_insert_node(struct trie
*t
, u32 key
, int plen
)
1078 struct tnode
*tp
= NULL
, *tn
= NULL
;
1082 struct list_head
*fa_head
= NULL
;
1083 struct leaf_info
*li
;
1089 /* If we point to NULL, stop. Either the tree is empty and we should
1090 * just put a new leaf in if, or we have reached an empty child slot,
1091 * and we should just put our new leaf in that.
1092 * If we point to a T_TNODE, check if it matches our key. Note that
1093 * a T_TNODE might be skipping any number of bits - its 'pos' need
1094 * not be the parent's 'pos'+'bits'!
1096 * If it does match the current key, get pos/bits from it, extract
1097 * the index from our key, push the T_TNODE and walk the tree.
1099 * If it doesn't, we have to replace it with a new T_TNODE.
1101 * If we point to a T_LEAF, it might or might not have the same key
1102 * as we do. If it does, just change the value, update the T_LEAF's
1103 * value, and return it.
1104 * If it doesn't, we need to replace it with a T_TNODE.
1107 while (n
!= NULL
&& NODE_TYPE(n
) == T_TNODE
) {
1108 tn
= (struct tnode
*) n
;
1112 if (tkey_sub_equals(tn
->key
, pos
, tn
->pos
-pos
, key
)) {
1114 pos
= tn
->pos
+ tn
->bits
;
1115 n
= tnode_get_child(tn
,
1116 tkey_extract_bits(key
,
1120 BUG_ON(n
&& node_parent(n
) != tn
);
1126 * n ----> NULL, LEAF or TNODE
1128 * tp is n's (parent) ----> NULL or TNODE
1131 BUG_ON(tp
&& IS_LEAF(tp
));
1133 /* Case 1: n is a leaf. Compare prefixes */
1135 if (n
!= NULL
&& IS_LEAF(n
) && tkey_equals(key
, n
->key
)) {
1136 l
= (struct leaf
*) n
;
1137 li
= leaf_info_new(plen
);
1142 fa_head
= &li
->falh
;
1143 insert_leaf_info(&l
->list
, li
);
1152 li
= leaf_info_new(plen
);
1159 fa_head
= &li
->falh
;
1160 insert_leaf_info(&l
->list
, li
);
1162 if (t
->trie
&& n
== NULL
) {
1163 /* Case 2: n is NULL, and will just insert a new leaf */
1165 node_set_parent((struct node
*)l
, tp
);
1167 cindex
= tkey_extract_bits(key
, tp
->pos
, tp
->bits
);
1168 put_child(t
, (struct tnode
*)tp
, cindex
, (struct node
*)l
);
1170 /* Case 3: n is a LEAF or a TNODE and the key doesn't match. */
1172 * Add a new tnode here
1173 * first tnode need some special handling
1177 pos
= tp
->pos
+tp
->bits
;
1182 newpos
= tkey_mismatch(key
, pos
, n
->key
);
1183 tn
= tnode_new(n
->key
, newpos
, 1);
1186 tn
= tnode_new(key
, newpos
, 1); /* First tnode */
1195 node_set_parent((struct node
*)tn
, tp
);
1197 missbit
= tkey_extract_bits(key
, newpos
, 1);
1198 put_child(t
, tn
, missbit
, (struct node
*)l
);
1199 put_child(t
, tn
, 1-missbit
, n
);
1202 cindex
= tkey_extract_bits(key
, tp
->pos
, tp
->bits
);
1203 put_child(t
, (struct tnode
*)tp
, cindex
,
1206 rcu_assign_pointer(t
->trie
, (struct node
*)tn
);
1211 if (tp
&& tp
->pos
+ tp
->bits
> 32)
1212 pr_warning("fib_trie"
1213 " tp=%p pos=%d, bits=%d, key=%0x plen=%d\n",
1214 tp
, tp
->pos
, tp
->bits
, key
, plen
);
1216 /* Rebalance the trie */
1218 trie_rebalance(t
, tp
);
1224 * Caller must hold RTNL.
1226 static int fn_trie_insert(struct fib_table
*tb
, struct fib_config
*cfg
)
1228 struct trie
*t
= (struct trie
*) tb
->tb_data
;
1229 struct fib_alias
*fa
, *new_fa
;
1230 struct list_head
*fa_head
= NULL
;
1231 struct fib_info
*fi
;
1232 int plen
= cfg
->fc_dst_len
;
1233 u8 tos
= cfg
->fc_tos
;
1241 key
= ntohl(cfg
->fc_dst
);
1243 pr_debug("Insert table=%u %08x/%d\n", tb
->tb_id
, key
, plen
);
1245 mask
= ntohl(inet_make_mask(plen
));
1252 fi
= fib_create_info(cfg
);
1258 l
= fib_find_node(t
, key
);
1262 fa_head
= get_fa_head(l
, plen
);
1263 fa
= fib_find_alias(fa_head
, tos
, fi
->fib_priority
);
1266 /* Now fa, if non-NULL, points to the first fib alias
1267 * with the same keys [prefix,tos,priority], if such key already
1268 * exists or to the node before which we will insert new one.
1270 * If fa is NULL, we will need to allocate a new one and
1271 * insert to the head of f.
1273 * If f is NULL, no fib node matched the destination key
1274 * and we need to allocate a new one of those as well.
1277 if (fa
&& fa
->fa_tos
== tos
&&
1278 fa
->fa_info
->fib_priority
== fi
->fib_priority
) {
1279 struct fib_alias
*fa_first
, *fa_match
;
1282 if (cfg
->fc_nlflags
& NLM_F_EXCL
)
1286 * 1. Find exact match for type, scope, fib_info to avoid
1288 * 2. Find next 'fa' (or head), NLM_F_APPEND inserts before it
1292 fa
= list_entry(fa
->fa_list
.prev
, struct fib_alias
, fa_list
);
1293 list_for_each_entry_continue(fa
, fa_head
, fa_list
) {
1294 if (fa
->fa_tos
!= tos
)
1296 if (fa
->fa_info
->fib_priority
!= fi
->fib_priority
)
1298 if (fa
->fa_type
== cfg
->fc_type
&&
1299 fa
->fa_scope
== cfg
->fc_scope
&&
1300 fa
->fa_info
== fi
) {
1306 if (cfg
->fc_nlflags
& NLM_F_REPLACE
) {
1307 struct fib_info
*fi_drop
;
1317 new_fa
= kmem_cache_alloc(fn_alias_kmem
, GFP_KERNEL
);
1321 fi_drop
= fa
->fa_info
;
1322 new_fa
->fa_tos
= fa
->fa_tos
;
1323 new_fa
->fa_info
= fi
;
1324 new_fa
->fa_type
= cfg
->fc_type
;
1325 new_fa
->fa_scope
= cfg
->fc_scope
;
1326 state
= fa
->fa_state
;
1327 new_fa
->fa_state
= state
& ~FA_S_ACCESSED
;
1329 list_replace_rcu(&fa
->fa_list
, &new_fa
->fa_list
);
1330 alias_free_mem_rcu(fa
);
1332 fib_release_info(fi_drop
);
1333 if (state
& FA_S_ACCESSED
)
1334 rt_cache_flush(cfg
->fc_nlinfo
.nl_net
, -1);
1335 rtmsg_fib(RTM_NEWROUTE
, htonl(key
), new_fa
, plen
,
1336 tb
->tb_id
, &cfg
->fc_nlinfo
, NLM_F_REPLACE
);
1340 /* Error if we find a perfect match which
1341 * uses the same scope, type, and nexthop
1347 if (!(cfg
->fc_nlflags
& NLM_F_APPEND
))
1351 if (!(cfg
->fc_nlflags
& NLM_F_CREATE
))
1355 new_fa
= kmem_cache_alloc(fn_alias_kmem
, GFP_KERNEL
);
1359 new_fa
->fa_info
= fi
;
1360 new_fa
->fa_tos
= tos
;
1361 new_fa
->fa_type
= cfg
->fc_type
;
1362 new_fa
->fa_scope
= cfg
->fc_scope
;
1363 new_fa
->fa_state
= 0;
1365 * Insert new entry to the list.
1369 fa_head
= fib_insert_node(t
, key
, plen
);
1370 if (unlikely(!fa_head
)) {
1372 goto out_free_new_fa
;
1376 list_add_tail_rcu(&new_fa
->fa_list
,
1377 (fa
? &fa
->fa_list
: fa_head
));
1379 rt_cache_flush(cfg
->fc_nlinfo
.nl_net
, -1);
1380 rtmsg_fib(RTM_NEWROUTE
, htonl(key
), new_fa
, plen
, tb
->tb_id
,
1381 &cfg
->fc_nlinfo
, 0);
1386 kmem_cache_free(fn_alias_kmem
, new_fa
);
1388 fib_release_info(fi
);
1393 /* should be called with rcu_read_lock */
1394 static int check_leaf(struct trie
*t
, struct leaf
*l
,
1395 t_key key
, const struct flowi
*flp
,
1396 struct fib_result
*res
)
1398 struct leaf_info
*li
;
1399 struct hlist_head
*hhead
= &l
->list
;
1400 struct hlist_node
*node
;
1402 hlist_for_each_entry_rcu(li
, node
, hhead
, hlist
) {
1404 int plen
= li
->plen
;
1405 __be32 mask
= inet_make_mask(plen
);
1407 if (l
->key
!= (key
& ntohl(mask
)))
1410 err
= fib_semantic_match(&li
->falh
, flp
, res
, plen
);
1412 #ifdef CONFIG_IP_FIB_TRIE_STATS
1414 t
->stats
.semantic_match_passed
++;
1416 t
->stats
.semantic_match_miss
++;
1425 static int fn_trie_lookup(struct fib_table
*tb
, const struct flowi
*flp
,
1426 struct fib_result
*res
)
1428 struct trie
*t
= (struct trie
*) tb
->tb_data
;
1433 t_key key
= ntohl(flp
->fl4_dst
);
1436 int current_prefix_length
= KEYLENGTH
;
1438 t_key node_prefix
, key_prefix
, pref_mismatch
;
1443 n
= rcu_dereference(t
->trie
);
1447 #ifdef CONFIG_IP_FIB_TRIE_STATS
1453 ret
= check_leaf(t
, (struct leaf
*)n
, key
, flp
, res
);
1457 pn
= (struct tnode
*) n
;
1465 cindex
= tkey_extract_bits(mask_pfx(key
, current_prefix_length
),
1468 n
= tnode_get_child_rcu(pn
, cindex
);
1471 #ifdef CONFIG_IP_FIB_TRIE_STATS
1472 t
->stats
.null_node_hit
++;
1478 ret
= check_leaf(t
, (struct leaf
*)n
, key
, flp
, res
);
1484 cn
= (struct tnode
*)n
;
1487 * It's a tnode, and we can do some extra checks here if we
1488 * like, to avoid descending into a dead-end branch.
1489 * This tnode is in the parent's child array at index
1490 * key[p_pos..p_pos+p_bits] but potentially with some bits
1491 * chopped off, so in reality the index may be just a
1492 * subprefix, padded with zero at the end.
1493 * We can also take a look at any skipped bits in this
1494 * tnode - everything up to p_pos is supposed to be ok,
1495 * and the non-chopped bits of the index (se previous
1496 * paragraph) are also guaranteed ok, but the rest is
1497 * considered unknown.
1499 * The skipped bits are key[pos+bits..cn->pos].
1502 /* If current_prefix_length < pos+bits, we are already doing
1503 * actual prefix matching, which means everything from
1504 * pos+(bits-chopped_off) onward must be zero along some
1505 * branch of this subtree - otherwise there is *no* valid
1506 * prefix present. Here we can only check the skipped
1507 * bits. Remember, since we have already indexed into the
1508 * parent's child array, we know that the bits we chopped of
1512 /* NOTA BENE: Checking only skipped bits
1513 for the new node here */
1515 if (current_prefix_length
< pos
+bits
) {
1516 if (tkey_extract_bits(cn
->key
, current_prefix_length
,
1517 cn
->pos
- current_prefix_length
)
1523 * If chopped_off=0, the index is fully validated and we
1524 * only need to look at the skipped bits for this, the new,
1525 * tnode. What we actually want to do is to find out if
1526 * these skipped bits match our key perfectly, or if we will
1527 * have to count on finding a matching prefix further down,
1528 * because if we do, we would like to have some way of
1529 * verifying the existence of such a prefix at this point.
1532 /* The only thing we can do at this point is to verify that
1533 * any such matching prefix can indeed be a prefix to our
1534 * key, and if the bits in the node we are inspecting that
1535 * do not match our key are not ZERO, this cannot be true.
1536 * Thus, find out where there is a mismatch (before cn->pos)
1537 * and verify that all the mismatching bits are zero in the
1542 * Note: We aren't very concerned about the piece of
1543 * the key that precede pn->pos+pn->bits, since these
1544 * have already been checked. The bits after cn->pos
1545 * aren't checked since these are by definition
1546 * "unknown" at this point. Thus, what we want to see
1547 * is if we are about to enter the "prefix matching"
1548 * state, and in that case verify that the skipped
1549 * bits that will prevail throughout this subtree are
1550 * zero, as they have to be if we are to find a
1554 node_prefix
= mask_pfx(cn
->key
, cn
->pos
);
1555 key_prefix
= mask_pfx(key
, cn
->pos
);
1556 pref_mismatch
= key_prefix
^node_prefix
;
1560 * In short: If skipped bits in this node do not match
1561 * the search key, enter the "prefix matching"
1564 if (pref_mismatch
) {
1565 while (!(pref_mismatch
& (1<<(KEYLENGTH
-1)))) {
1567 pref_mismatch
= pref_mismatch
<< 1;
1569 key_prefix
= tkey_extract_bits(cn
->key
, mp
, cn
->pos
-mp
);
1571 if (key_prefix
!= 0)
1574 if (current_prefix_length
>= cn
->pos
)
1575 current_prefix_length
= mp
;
1578 pn
= (struct tnode
*)n
; /* Descend */
1585 /* As zero don't change the child key (cindex) */
1586 while ((chopped_off
<= pn
->bits
)
1587 && !(cindex
& (1<<(chopped_off
-1))))
1590 /* Decrease current_... with bits chopped off */
1591 if (current_prefix_length
> pn
->pos
+ pn
->bits
- chopped_off
)
1592 current_prefix_length
= pn
->pos
+ pn
->bits
1596 * Either we do the actual chop off according or if we have
1597 * chopped off all bits in this tnode walk up to our parent.
1600 if (chopped_off
<= pn
->bits
) {
1601 cindex
&= ~(1 << (chopped_off
-1));
1603 struct tnode
*parent
= node_parent_rcu((struct node
*) pn
);
1607 /* Get Child's index */
1608 cindex
= tkey_extract_bits(pn
->key
, parent
->pos
, parent
->bits
);
1612 #ifdef CONFIG_IP_FIB_TRIE_STATS
1613 t
->stats
.backtrack
++;
1626 * Remove the leaf and return parent.
1628 static void trie_leaf_remove(struct trie
*t
, struct leaf
*l
)
1630 struct tnode
*tp
= node_parent((struct node
*) l
);
1632 pr_debug("entering trie_leaf_remove(%p)\n", l
);
1635 t_key cindex
= tkey_extract_bits(l
->key
, tp
->pos
, tp
->bits
);
1636 put_child(t
, (struct tnode
*)tp
, cindex
, NULL
);
1637 trie_rebalance(t
, tp
);
1639 rcu_assign_pointer(t
->trie
, NULL
);
1645 * Caller must hold RTNL.
1647 static int fn_trie_delete(struct fib_table
*tb
, struct fib_config
*cfg
)
1649 struct trie
*t
= (struct trie
*) tb
->tb_data
;
1651 int plen
= cfg
->fc_dst_len
;
1652 u8 tos
= cfg
->fc_tos
;
1653 struct fib_alias
*fa
, *fa_to_delete
;
1654 struct list_head
*fa_head
;
1656 struct leaf_info
*li
;
1661 key
= ntohl(cfg
->fc_dst
);
1662 mask
= ntohl(inet_make_mask(plen
));
1668 l
= fib_find_node(t
, key
);
1673 fa_head
= get_fa_head(l
, plen
);
1674 fa
= fib_find_alias(fa_head
, tos
, 0);
1679 pr_debug("Deleting %08x/%d tos=%d t=%p\n", key
, plen
, tos
, t
);
1681 fa_to_delete
= NULL
;
1682 fa
= list_entry(fa
->fa_list
.prev
, struct fib_alias
, fa_list
);
1683 list_for_each_entry_continue(fa
, fa_head
, fa_list
) {
1684 struct fib_info
*fi
= fa
->fa_info
;
1686 if (fa
->fa_tos
!= tos
)
1689 if ((!cfg
->fc_type
|| fa
->fa_type
== cfg
->fc_type
) &&
1690 (cfg
->fc_scope
== RT_SCOPE_NOWHERE
||
1691 fa
->fa_scope
== cfg
->fc_scope
) &&
1692 (!cfg
->fc_protocol
||
1693 fi
->fib_protocol
== cfg
->fc_protocol
) &&
1694 fib_nh_match(cfg
, fi
) == 0) {
1704 rtmsg_fib(RTM_DELROUTE
, htonl(key
), fa
, plen
, tb
->tb_id
,
1705 &cfg
->fc_nlinfo
, 0);
1707 l
= fib_find_node(t
, key
);
1708 li
= find_leaf_info(l
, plen
);
1710 list_del_rcu(&fa
->fa_list
);
1712 if (list_empty(fa_head
)) {
1713 hlist_del_rcu(&li
->hlist
);
1717 if (hlist_empty(&l
->list
))
1718 trie_leaf_remove(t
, l
);
1720 if (fa
->fa_state
& FA_S_ACCESSED
)
1721 rt_cache_flush(cfg
->fc_nlinfo
.nl_net
, -1);
1723 fib_release_info(fa
->fa_info
);
1724 alias_free_mem_rcu(fa
);
1728 static int trie_flush_list(struct list_head
*head
)
1730 struct fib_alias
*fa
, *fa_node
;
1733 list_for_each_entry_safe(fa
, fa_node
, head
, fa_list
) {
1734 struct fib_info
*fi
= fa
->fa_info
;
1736 if (fi
&& (fi
->fib_flags
& RTNH_F_DEAD
)) {
1737 list_del_rcu(&fa
->fa_list
);
1738 fib_release_info(fa
->fa_info
);
1739 alias_free_mem_rcu(fa
);
1746 static int trie_flush_leaf(struct leaf
*l
)
1749 struct hlist_head
*lih
= &l
->list
;
1750 struct hlist_node
*node
, *tmp
;
1751 struct leaf_info
*li
= NULL
;
1753 hlist_for_each_entry_safe(li
, node
, tmp
, lih
, hlist
) {
1754 found
+= trie_flush_list(&li
->falh
);
1756 if (list_empty(&li
->falh
)) {
1757 hlist_del_rcu(&li
->hlist
);
1765 * Scan for the next right leaf starting at node p->child[idx]
1766 * Since we have back pointer, no recursion necessary.
1768 static struct leaf
*leaf_walk_rcu(struct tnode
*p
, struct node
*c
)
1774 idx
= tkey_extract_bits(c
->key
, p
->pos
, p
->bits
) + 1;
1778 while (idx
< 1u << p
->bits
) {
1779 c
= tnode_get_child_rcu(p
, idx
++);
1784 prefetch(p
->child
[idx
]);
1785 return (struct leaf
*) c
;
1788 /* Rescan start scanning in new node */
1789 p
= (struct tnode
*) c
;
1793 /* Node empty, walk back up to parent */
1794 c
= (struct node
*) p
;
1795 } while ( (p
= node_parent_rcu(c
)) != NULL
);
1797 return NULL
; /* Root of trie */
1800 static struct leaf
*trie_firstleaf(struct trie
*t
)
1802 struct tnode
*n
= (struct tnode
*) rcu_dereference(t
->trie
);
1807 if (IS_LEAF(n
)) /* trie is just a leaf */
1808 return (struct leaf
*) n
;
1810 return leaf_walk_rcu(n
, NULL
);
1813 static struct leaf
*trie_nextleaf(struct leaf
*l
)
1815 struct node
*c
= (struct node
*) l
;
1816 struct tnode
*p
= node_parent_rcu(c
);
1819 return NULL
; /* trie with just one leaf */
1821 return leaf_walk_rcu(p
, c
);
1824 static struct leaf
*trie_leafindex(struct trie
*t
, int index
)
1826 struct leaf
*l
= trie_firstleaf(t
);
1828 while (l
&& index
-- > 0)
1829 l
= trie_nextleaf(l
);
1836 * Caller must hold RTNL.
1838 static int fn_trie_flush(struct fib_table
*tb
)
1840 struct trie
*t
= (struct trie
*) tb
->tb_data
;
1841 struct leaf
*l
, *ll
= NULL
;
1844 for (l
= trie_firstleaf(t
); l
; l
= trie_nextleaf(l
)) {
1845 found
+= trie_flush_leaf(l
);
1847 if (ll
&& hlist_empty(&ll
->list
))
1848 trie_leaf_remove(t
, ll
);
1852 if (ll
&& hlist_empty(&ll
->list
))
1853 trie_leaf_remove(t
, ll
);
1855 pr_debug("trie_flush found=%d\n", found
);
1859 static void fn_trie_select_default(struct fib_table
*tb
,
1860 const struct flowi
*flp
,
1861 struct fib_result
*res
)
1863 struct trie
*t
= (struct trie
*) tb
->tb_data
;
1864 int order
, last_idx
;
1865 struct fib_info
*fi
= NULL
;
1866 struct fib_info
*last_resort
;
1867 struct fib_alias
*fa
= NULL
;
1868 struct list_head
*fa_head
;
1877 l
= fib_find_node(t
, 0);
1881 fa_head
= get_fa_head(l
, 0);
1885 if (list_empty(fa_head
))
1888 list_for_each_entry_rcu(fa
, fa_head
, fa_list
) {
1889 struct fib_info
*next_fi
= fa
->fa_info
;
1891 if (fa
->fa_scope
!= res
->scope
||
1892 fa
->fa_type
!= RTN_UNICAST
)
1895 if (next_fi
->fib_priority
> res
->fi
->fib_priority
)
1897 if (!next_fi
->fib_nh
[0].nh_gw
||
1898 next_fi
->fib_nh
[0].nh_scope
!= RT_SCOPE_LINK
)
1900 fa
->fa_state
|= FA_S_ACCESSED
;
1903 if (next_fi
!= res
->fi
)
1905 } else if (!fib_detect_death(fi
, order
, &last_resort
,
1906 &last_idx
, tb
->tb_default
)) {
1907 fib_result_assign(res
, fi
);
1908 tb
->tb_default
= order
;
1914 if (order
<= 0 || fi
== NULL
) {
1915 tb
->tb_default
= -1;
1919 if (!fib_detect_death(fi
, order
, &last_resort
, &last_idx
,
1921 fib_result_assign(res
, fi
);
1922 tb
->tb_default
= order
;
1926 fib_result_assign(res
, last_resort
);
1927 tb
->tb_default
= last_idx
;
1932 static int fn_trie_dump_fa(t_key key
, int plen
, struct list_head
*fah
,
1933 struct fib_table
*tb
,
1934 struct sk_buff
*skb
, struct netlink_callback
*cb
)
1937 struct fib_alias
*fa
;
1938 __be32 xkey
= htonl(key
);
1943 /* rcu_read_lock is hold by caller */
1945 list_for_each_entry_rcu(fa
, fah
, fa_list
) {
1951 if (fib_dump_info(skb
, NETLINK_CB(cb
->skb
).pid
,
1960 fa
->fa_info
, NLM_F_MULTI
) < 0) {
1970 static int fn_trie_dump_leaf(struct leaf
*l
, struct fib_table
*tb
,
1971 struct sk_buff
*skb
, struct netlink_callback
*cb
)
1973 struct leaf_info
*li
;
1974 struct hlist_node
*node
;
1980 /* rcu_read_lock is hold by caller */
1981 hlist_for_each_entry_rcu(li
, node
, &l
->list
, hlist
) {
1990 if (list_empty(&li
->falh
))
1993 if (fn_trie_dump_fa(l
->key
, li
->plen
, &li
->falh
, tb
, skb
, cb
) < 0) {
2004 static int fn_trie_dump(struct fib_table
*tb
, struct sk_buff
*skb
,
2005 struct netlink_callback
*cb
)
2008 struct trie
*t
= (struct trie
*) tb
->tb_data
;
2009 t_key key
= cb
->args
[2];
2010 int count
= cb
->args
[3];
2013 /* Dump starting at last key.
2014 * Note: 0.0.0.0/0 (ie default) is first key.
2017 l
= trie_firstleaf(t
);
2019 /* Normally, continue from last key, but if that is missing
2020 * fallback to using slow rescan
2022 l
= fib_find_node(t
, key
);
2024 l
= trie_leafindex(t
, count
);
2028 cb
->args
[2] = l
->key
;
2029 if (fn_trie_dump_leaf(l
, tb
, skb
, cb
) < 0) {
2030 cb
->args
[3] = count
;
2036 l
= trie_nextleaf(l
);
2037 memset(&cb
->args
[4], 0,
2038 sizeof(cb
->args
) - 4*sizeof(cb
->args
[0]));
2040 cb
->args
[3] = count
;
2046 void __init
fib_hash_init(void)
2048 fn_alias_kmem
= kmem_cache_create("ip_fib_alias",
2049 sizeof(struct fib_alias
),
2050 0, SLAB_PANIC
, NULL
);
2052 trie_leaf_kmem
= kmem_cache_create("ip_fib_trie",
2053 max(sizeof(struct leaf
),
2054 sizeof(struct leaf_info
)),
2055 0, SLAB_PANIC
, NULL
);
2059 /* Fix more generic FIB names for init later */
2060 struct fib_table
*fib_hash_table(u32 id
)
2062 struct fib_table
*tb
;
2065 tb
= kmalloc(sizeof(struct fib_table
) + sizeof(struct trie
),
2071 tb
->tb_default
= -1;
2072 tb
->tb_lookup
= fn_trie_lookup
;
2073 tb
->tb_insert
= fn_trie_insert
;
2074 tb
->tb_delete
= fn_trie_delete
;
2075 tb
->tb_flush
= fn_trie_flush
;
2076 tb
->tb_select_default
= fn_trie_select_default
;
2077 tb
->tb_dump
= fn_trie_dump
;
2079 t
= (struct trie
*) tb
->tb_data
;
2080 memset(t
, 0, sizeof(*t
));
2082 if (id
== RT_TABLE_LOCAL
)
2083 pr_info("IPv4 FIB: Using LC-trie version %s\n", VERSION
);
2088 #ifdef CONFIG_PROC_FS
2089 /* Depth first Trie walk iterator */
2090 struct fib_trie_iter
{
2091 struct seq_net_private p
;
2092 struct fib_table
*tb
;
2093 struct tnode
*tnode
;
2098 static struct node
*fib_trie_get_next(struct fib_trie_iter
*iter
)
2100 struct tnode
*tn
= iter
->tnode
;
2101 unsigned cindex
= iter
->index
;
2104 /* A single entry routing table */
2108 pr_debug("get_next iter={node=%p index=%d depth=%d}\n",
2109 iter
->tnode
, iter
->index
, iter
->depth
);
2111 while (cindex
< (1<<tn
->bits
)) {
2112 struct node
*n
= tnode_get_child_rcu(tn
, cindex
);
2117 iter
->index
= cindex
+ 1;
2119 /* push down one level */
2120 iter
->tnode
= (struct tnode
*) n
;
2130 /* Current node exhausted, pop back up */
2131 p
= node_parent_rcu((struct node
*)tn
);
2133 cindex
= tkey_extract_bits(tn
->key
, p
->pos
, p
->bits
)+1;
2143 static struct node
*fib_trie_get_first(struct fib_trie_iter
*iter
,
2151 n
= rcu_dereference(t
->trie
);
2156 iter
->tnode
= (struct tnode
*) n
;
2168 static void trie_collect_stats(struct trie
*t
, struct trie_stat
*s
)
2171 struct fib_trie_iter iter
;
2173 memset(s
, 0, sizeof(*s
));
2176 for (n
= fib_trie_get_first(&iter
, t
); n
; n
= fib_trie_get_next(&iter
)) {
2178 struct leaf
*l
= (struct leaf
*)n
;
2179 struct leaf_info
*li
;
2180 struct hlist_node
*tmp
;
2183 s
->totdepth
+= iter
.depth
;
2184 if (iter
.depth
> s
->maxdepth
)
2185 s
->maxdepth
= iter
.depth
;
2187 hlist_for_each_entry_rcu(li
, tmp
, &l
->list
, hlist
)
2190 const struct tnode
*tn
= (const struct tnode
*) n
;
2194 if (tn
->bits
< MAX_STAT_DEPTH
)
2195 s
->nodesizes
[tn
->bits
]++;
2197 for (i
= 0; i
< (1<<tn
->bits
); i
++)
2206 * This outputs /proc/net/fib_triestats
2208 static void trie_show_stats(struct seq_file
*seq
, struct trie_stat
*stat
)
2210 unsigned i
, max
, pointers
, bytes
, avdepth
;
2213 avdepth
= stat
->totdepth
*100 / stat
->leaves
;
2217 seq_printf(seq
, "\tAver depth: %u.%02d\n",
2218 avdepth
/ 100, avdepth
% 100);
2219 seq_printf(seq
, "\tMax depth: %u\n", stat
->maxdepth
);
2221 seq_printf(seq
, "\tLeaves: %u\n", stat
->leaves
);
2222 bytes
= sizeof(struct leaf
) * stat
->leaves
;
2224 seq_printf(seq
, "\tPrefixes: %u\n", stat
->prefixes
);
2225 bytes
+= sizeof(struct leaf_info
) * stat
->prefixes
;
2227 seq_printf(seq
, "\tInternal nodes: %u\n\t", stat
->tnodes
);
2228 bytes
+= sizeof(struct tnode
) * stat
->tnodes
;
2230 max
= MAX_STAT_DEPTH
;
2231 while (max
> 0 && stat
->nodesizes
[max
-1] == 0)
2235 for (i
= 1; i
<= max
; i
++)
2236 if (stat
->nodesizes
[i
] != 0) {
2237 seq_printf(seq
, " %u: %u", i
, stat
->nodesizes
[i
]);
2238 pointers
+= (1<<i
) * stat
->nodesizes
[i
];
2240 seq_putc(seq
, '\n');
2241 seq_printf(seq
, "\tPointers: %u\n", pointers
);
2243 bytes
+= sizeof(struct node
*) * pointers
;
2244 seq_printf(seq
, "Null ptrs: %u\n", stat
->nullpointers
);
2245 seq_printf(seq
, "Total size: %u kB\n", (bytes
+ 1023) / 1024);
2248 #ifdef CONFIG_IP_FIB_TRIE_STATS
2249 static void trie_show_usage(struct seq_file
*seq
,
2250 const struct trie_use_stats
*stats
)
2252 seq_printf(seq
, "\nCounters:\n---------\n");
2253 seq_printf(seq
, "gets = %u\n", stats
->gets
);
2254 seq_printf(seq
, "backtracks = %u\n", stats
->backtrack
);
2255 seq_printf(seq
, "semantic match passed = %u\n",
2256 stats
->semantic_match_passed
);
2257 seq_printf(seq
, "semantic match miss = %u\n",
2258 stats
->semantic_match_miss
);
2259 seq_printf(seq
, "null node hit= %u\n", stats
->null_node_hit
);
2260 seq_printf(seq
, "skipped node resize = %u\n\n",
2261 stats
->resize_node_skipped
);
2263 #endif /* CONFIG_IP_FIB_TRIE_STATS */
2265 static void fib_table_print(struct seq_file
*seq
, struct fib_table
*tb
)
2267 if (tb
->tb_id
== RT_TABLE_LOCAL
)
2268 seq_puts(seq
, "Local:\n");
2269 else if (tb
->tb_id
== RT_TABLE_MAIN
)
2270 seq_puts(seq
, "Main:\n");
2272 seq_printf(seq
, "Id %d:\n", tb
->tb_id
);
2276 static int fib_triestat_seq_show(struct seq_file
*seq
, void *v
)
2278 struct net
*net
= (struct net
*)seq
->private;
2282 "Basic info: size of leaf:"
2283 " %Zd bytes, size of tnode: %Zd bytes.\n",
2284 sizeof(struct leaf
), sizeof(struct tnode
));
2286 for (h
= 0; h
< FIB_TABLE_HASHSZ
; h
++) {
2287 struct hlist_head
*head
= &net
->ipv4
.fib_table_hash
[h
];
2288 struct hlist_node
*node
;
2289 struct fib_table
*tb
;
2291 hlist_for_each_entry_rcu(tb
, node
, head
, tb_hlist
) {
2292 struct trie
*t
= (struct trie
*) tb
->tb_data
;
2293 struct trie_stat stat
;
2298 fib_table_print(seq
, tb
);
2300 trie_collect_stats(t
, &stat
);
2301 trie_show_stats(seq
, &stat
);
2302 #ifdef CONFIG_IP_FIB_TRIE_STATS
2303 trie_show_usage(seq
, &t
->stats
);
2311 static int fib_triestat_seq_open(struct inode
*inode
, struct file
*file
)
2313 return single_open_net(inode
, file
, fib_triestat_seq_show
);
2316 static const struct file_operations fib_triestat_fops
= {
2317 .owner
= THIS_MODULE
,
2318 .open
= fib_triestat_seq_open
,
2320 .llseek
= seq_lseek
,
2321 .release
= single_release_net
,
2324 static struct node
*fib_trie_get_idx(struct seq_file
*seq
, loff_t pos
)
2326 struct fib_trie_iter
*iter
= seq
->private;
2327 struct net
*net
= seq_file_net(seq
);
2331 for (h
= 0; h
< FIB_TABLE_HASHSZ
; h
++) {
2332 struct hlist_head
*head
= &net
->ipv4
.fib_table_hash
[h
];
2333 struct hlist_node
*node
;
2334 struct fib_table
*tb
;
2336 hlist_for_each_entry_rcu(tb
, node
, head
, tb_hlist
) {
2339 for (n
= fib_trie_get_first(iter
,
2340 (struct trie
*) tb
->tb_data
);
2341 n
; n
= fib_trie_get_next(iter
))
2352 static void *fib_trie_seq_start(struct seq_file
*seq
, loff_t
*pos
)
2356 return fib_trie_get_idx(seq
, *pos
);
2359 static void *fib_trie_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
2361 struct fib_trie_iter
*iter
= seq
->private;
2362 struct net
*net
= seq_file_net(seq
);
2363 struct fib_table
*tb
= iter
->tb
;
2364 struct hlist_node
*tb_node
;
2369 /* next node in same table */
2370 n
= fib_trie_get_next(iter
);
2374 /* walk rest of this hash chain */
2375 h
= tb
->tb_id
& (FIB_TABLE_HASHSZ
- 1);
2376 while ( (tb_node
= rcu_dereference(tb
->tb_hlist
.next
)) ) {
2377 tb
= hlist_entry(tb_node
, struct fib_table
, tb_hlist
);
2378 n
= fib_trie_get_first(iter
, (struct trie
*) tb
->tb_data
);
2383 /* new hash chain */
2384 while (++h
< FIB_TABLE_HASHSZ
) {
2385 struct hlist_head
*head
= &net
->ipv4
.fib_table_hash
[h
];
2386 hlist_for_each_entry_rcu(tb
, tb_node
, head
, tb_hlist
) {
2387 n
= fib_trie_get_first(iter
, (struct trie
*) tb
->tb_data
);
2399 static void fib_trie_seq_stop(struct seq_file
*seq
, void *v
)
2405 static void seq_indent(struct seq_file
*seq
, int n
)
2407 while (n
-- > 0) seq_puts(seq
, " ");
2410 static inline const char *rtn_scope(char *buf
, size_t len
, enum rt_scope_t s
)
2413 case RT_SCOPE_UNIVERSE
: return "universe";
2414 case RT_SCOPE_SITE
: return "site";
2415 case RT_SCOPE_LINK
: return "link";
2416 case RT_SCOPE_HOST
: return "host";
2417 case RT_SCOPE_NOWHERE
: return "nowhere";
2419 snprintf(buf
, len
, "scope=%d", s
);
2424 static const char *const rtn_type_names
[__RTN_MAX
] = {
2425 [RTN_UNSPEC
] = "UNSPEC",
2426 [RTN_UNICAST
] = "UNICAST",
2427 [RTN_LOCAL
] = "LOCAL",
2428 [RTN_BROADCAST
] = "BROADCAST",
2429 [RTN_ANYCAST
] = "ANYCAST",
2430 [RTN_MULTICAST
] = "MULTICAST",
2431 [RTN_BLACKHOLE
] = "BLACKHOLE",
2432 [RTN_UNREACHABLE
] = "UNREACHABLE",
2433 [RTN_PROHIBIT
] = "PROHIBIT",
2434 [RTN_THROW
] = "THROW",
2436 [RTN_XRESOLVE
] = "XRESOLVE",
2439 static inline const char *rtn_type(char *buf
, size_t len
, unsigned t
)
2441 if (t
< __RTN_MAX
&& rtn_type_names
[t
])
2442 return rtn_type_names
[t
];
2443 snprintf(buf
, len
, "type %u", t
);
2447 /* Pretty print the trie */
2448 static int fib_trie_seq_show(struct seq_file
*seq
, void *v
)
2450 const struct fib_trie_iter
*iter
= seq
->private;
2453 if (!node_parent_rcu(n
))
2454 fib_table_print(seq
, iter
->tb
);
2457 struct tnode
*tn
= (struct tnode
*) n
;
2458 __be32 prf
= htonl(mask_pfx(tn
->key
, tn
->pos
));
2460 seq_indent(seq
, iter
->depth
-1);
2461 seq_printf(seq
, " +-- %pI4/%d %d %d %d\n",
2462 &prf
, tn
->pos
, tn
->bits
, tn
->full_children
,
2463 tn
->empty_children
);
2466 struct leaf
*l
= (struct leaf
*) n
;
2467 struct leaf_info
*li
;
2468 struct hlist_node
*node
;
2469 __be32 val
= htonl(l
->key
);
2471 seq_indent(seq
, iter
->depth
);
2472 seq_printf(seq
, " |-- %pI4\n", &val
);
2474 hlist_for_each_entry_rcu(li
, node
, &l
->list
, hlist
) {
2475 struct fib_alias
*fa
;
2477 list_for_each_entry_rcu(fa
, &li
->falh
, fa_list
) {
2478 char buf1
[32], buf2
[32];
2480 seq_indent(seq
, iter
->depth
+1);
2481 seq_printf(seq
, " /%d %s %s", li
->plen
,
2482 rtn_scope(buf1
, sizeof(buf1
),
2484 rtn_type(buf2
, sizeof(buf2
),
2487 seq_printf(seq
, " tos=%d", fa
->fa_tos
);
2488 seq_putc(seq
, '\n');
2496 static const struct seq_operations fib_trie_seq_ops
= {
2497 .start
= fib_trie_seq_start
,
2498 .next
= fib_trie_seq_next
,
2499 .stop
= fib_trie_seq_stop
,
2500 .show
= fib_trie_seq_show
,
2503 static int fib_trie_seq_open(struct inode
*inode
, struct file
*file
)
2505 return seq_open_net(inode
, file
, &fib_trie_seq_ops
,
2506 sizeof(struct fib_trie_iter
));
2509 static const struct file_operations fib_trie_fops
= {
2510 .owner
= THIS_MODULE
,
2511 .open
= fib_trie_seq_open
,
2513 .llseek
= seq_lseek
,
2514 .release
= seq_release_net
,
2517 struct fib_route_iter
{
2518 struct seq_net_private p
;
2519 struct trie
*main_trie
;
2524 static struct leaf
*fib_route_get_idx(struct fib_route_iter
*iter
, loff_t pos
)
2526 struct leaf
*l
= NULL
;
2527 struct trie
*t
= iter
->main_trie
;
2529 /* use cache location of last found key */
2530 if (iter
->pos
> 0 && pos
>= iter
->pos
&& (l
= fib_find_node(t
, iter
->key
)))
2534 l
= trie_firstleaf(t
);
2537 while (l
&& pos
-- > 0) {
2539 l
= trie_nextleaf(l
);
2543 iter
->key
= pos
; /* remember it */
2545 iter
->pos
= 0; /* forget it */
2550 static void *fib_route_seq_start(struct seq_file
*seq
, loff_t
*pos
)
2553 struct fib_route_iter
*iter
= seq
->private;
2554 struct fib_table
*tb
;
2557 tb
= fib_get_table(seq_file_net(seq
), RT_TABLE_MAIN
);
2561 iter
->main_trie
= (struct trie
*) tb
->tb_data
;
2563 return SEQ_START_TOKEN
;
2565 return fib_route_get_idx(iter
, *pos
- 1);
2568 static void *fib_route_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
2570 struct fib_route_iter
*iter
= seq
->private;
2574 if (v
== SEQ_START_TOKEN
) {
2576 l
= trie_firstleaf(iter
->main_trie
);
2579 l
= trie_nextleaf(l
);
2589 static void fib_route_seq_stop(struct seq_file
*seq
, void *v
)
2595 static unsigned fib_flag_trans(int type
, __be32 mask
, const struct fib_info
*fi
)
2597 static unsigned type2flags
[RTN_MAX
+ 1] = {
2598 [7] = RTF_REJECT
, [8] = RTF_REJECT
,
2600 unsigned flags
= type2flags
[type
];
2602 if (fi
&& fi
->fib_nh
->nh_gw
)
2603 flags
|= RTF_GATEWAY
;
2604 if (mask
== htonl(0xFFFFFFFF))
2611 * This outputs /proc/net/route.
2612 * The format of the file is not supposed to be changed
2613 * and needs to be same as fib_hash output to avoid breaking
2616 static int fib_route_seq_show(struct seq_file
*seq
, void *v
)
2619 struct leaf_info
*li
;
2620 struct hlist_node
*node
;
2622 if (v
== SEQ_START_TOKEN
) {
2623 seq_printf(seq
, "%-127s\n", "Iface\tDestination\tGateway "
2624 "\tFlags\tRefCnt\tUse\tMetric\tMask\t\tMTU"
2629 hlist_for_each_entry_rcu(li
, node
, &l
->list
, hlist
) {
2630 struct fib_alias
*fa
;
2631 __be32 mask
, prefix
;
2633 mask
= inet_make_mask(li
->plen
);
2634 prefix
= htonl(l
->key
);
2636 list_for_each_entry_rcu(fa
, &li
->falh
, fa_list
) {
2637 const struct fib_info
*fi
= fa
->fa_info
;
2638 unsigned flags
= fib_flag_trans(fa
->fa_type
, mask
, fi
);
2641 if (fa
->fa_type
== RTN_BROADCAST
2642 || fa
->fa_type
== RTN_MULTICAST
)
2647 "%s\t%08X\t%08X\t%04X\t%d\t%u\t"
2648 "%d\t%08X\t%d\t%u\t%u%n",
2649 fi
->fib_dev
? fi
->fib_dev
->name
: "*",
2651 fi
->fib_nh
->nh_gw
, flags
, 0, 0,
2655 fi
->fib_advmss
+ 40 : 0),
2657 fi
->fib_rtt
>> 3, &len
);
2660 "*\t%08X\t%08X\t%04X\t%d\t%u\t"
2661 "%d\t%08X\t%d\t%u\t%u%n",
2662 prefix
, 0, flags
, 0, 0, 0,
2663 mask
, 0, 0, 0, &len
);
2665 seq_printf(seq
, "%*s\n", 127 - len
, "");
2672 static const struct seq_operations fib_route_seq_ops
= {
2673 .start
= fib_route_seq_start
,
2674 .next
= fib_route_seq_next
,
2675 .stop
= fib_route_seq_stop
,
2676 .show
= fib_route_seq_show
,
2679 static int fib_route_seq_open(struct inode
*inode
, struct file
*file
)
2681 return seq_open_net(inode
, file
, &fib_route_seq_ops
,
2682 sizeof(struct fib_route_iter
));
2685 static const struct file_operations fib_route_fops
= {
2686 .owner
= THIS_MODULE
,
2687 .open
= fib_route_seq_open
,
2689 .llseek
= seq_lseek
,
2690 .release
= seq_release_net
,
2693 int __net_init
fib_proc_init(struct net
*net
)
2695 if (!proc_net_fops_create(net
, "fib_trie", S_IRUGO
, &fib_trie_fops
))
2698 if (!proc_net_fops_create(net
, "fib_triestat", S_IRUGO
,
2699 &fib_triestat_fops
))
2702 if (!proc_net_fops_create(net
, "route", S_IRUGO
, &fib_route_fops
))
2708 proc_net_remove(net
, "fib_triestat");
2710 proc_net_remove(net
, "fib_trie");
2715 void __net_exit
fib_proc_exit(struct net
*net
)
2717 proc_net_remove(net
, "fib_trie");
2718 proc_net_remove(net
, "fib_triestat");
2719 proc_net_remove(net
, "route");
2722 #endif /* CONFIG_PROC_FS */