ACPI: thinkpad-acpi: add a safety net for TPEC fan control mode
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / ipv6 / ip6_fib.c
blob0c4aaac851cb1e37c0a27f18337fa4cc84655d25
1 /*
2 * Linux INET6 implementation
3 * Forwarding Information Database
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * $Id: ip6_fib.c,v 1.25 2001/10/31 21:55:55 davem Exp $
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
17 * Changes:
18 * Yuji SEKIYA @USAGI: Support default route on router node;
19 * remove ip6_null_entry from the top of
20 * routing table.
21 * Ville Nuorvala: Fixed routing subtrees.
23 #include <linux/errno.h>
24 #include <linux/types.h>
25 #include <linux/net.h>
26 #include <linux/route.h>
27 #include <linux/netdevice.h>
28 #include <linux/in6.h>
29 #include <linux/init.h>
30 #include <linux/list.h>
32 #ifdef CONFIG_PROC_FS
33 #include <linux/proc_fs.h>
34 #endif
36 #include <net/ipv6.h>
37 #include <net/ndisc.h>
38 #include <net/addrconf.h>
40 #include <net/ip6_fib.h>
41 #include <net/ip6_route.h>
43 #define RT6_DEBUG 2
45 #if RT6_DEBUG >= 3
46 #define RT6_TRACE(x...) printk(KERN_DEBUG x)
47 #else
48 #define RT6_TRACE(x...) do { ; } while (0)
49 #endif
51 struct rt6_statistics rt6_stats;
53 static struct kmem_cache * fib6_node_kmem __read_mostly;
55 enum fib_walk_state_t
57 #ifdef CONFIG_IPV6_SUBTREES
58 FWS_S,
59 #endif
60 FWS_L,
61 FWS_R,
62 FWS_C,
63 FWS_U
66 struct fib6_cleaner_t
68 struct fib6_walker_t w;
69 int (*func)(struct rt6_info *, void *arg);
70 void *arg;
73 static DEFINE_RWLOCK(fib6_walker_lock);
75 #ifdef CONFIG_IPV6_SUBTREES
76 #define FWS_INIT FWS_S
77 #else
78 #define FWS_INIT FWS_L
79 #endif
81 static void fib6_prune_clones(struct fib6_node *fn, struct rt6_info *rt);
82 static struct rt6_info * fib6_find_prefix(struct fib6_node *fn);
83 static struct fib6_node * fib6_repair_tree(struct fib6_node *fn);
84 static int fib6_walk(struct fib6_walker_t *w);
85 static int fib6_walk_continue(struct fib6_walker_t *w);
88 * A routing update causes an increase of the serial number on the
89 * affected subtree. This allows for cached routes to be asynchronously
90 * tested when modifications are made to the destination cache as a
91 * result of redirects, path MTU changes, etc.
94 static __u32 rt_sernum;
96 static DEFINE_TIMER(ip6_fib_timer, fib6_run_gc, 0, 0);
98 static struct fib6_walker_t fib6_walker_list = {
99 .prev = &fib6_walker_list,
100 .next = &fib6_walker_list,
103 #define FOR_WALKERS(w) for ((w)=fib6_walker_list.next; (w) != &fib6_walker_list; (w)=(w)->next)
105 static inline void fib6_walker_link(struct fib6_walker_t *w)
107 write_lock_bh(&fib6_walker_lock);
108 w->next = fib6_walker_list.next;
109 w->prev = &fib6_walker_list;
110 w->next->prev = w;
111 w->prev->next = w;
112 write_unlock_bh(&fib6_walker_lock);
115 static inline void fib6_walker_unlink(struct fib6_walker_t *w)
117 write_lock_bh(&fib6_walker_lock);
118 w->next->prev = w->prev;
119 w->prev->next = w->next;
120 w->prev = w->next = w;
121 write_unlock_bh(&fib6_walker_lock);
123 static __inline__ u32 fib6_new_sernum(void)
125 u32 n = ++rt_sernum;
126 if ((__s32)n <= 0)
127 rt_sernum = n = 1;
128 return n;
132 * Auxiliary address test functions for the radix tree.
134 * These assume a 32bit processor (although it will work on
135 * 64bit processors)
139 * test bit
142 static __inline__ __be32 addr_bit_set(void *token, int fn_bit)
144 __be32 *addr = token;
146 return htonl(1 << ((~fn_bit)&0x1F)) & addr[fn_bit>>5];
149 static __inline__ struct fib6_node * node_alloc(void)
151 struct fib6_node *fn;
153 if ((fn = kmem_cache_alloc(fib6_node_kmem, GFP_ATOMIC)) != NULL)
154 memset(fn, 0, sizeof(struct fib6_node));
156 return fn;
159 static __inline__ void node_free(struct fib6_node * fn)
161 kmem_cache_free(fib6_node_kmem, fn);
164 static __inline__ void rt6_release(struct rt6_info *rt)
166 if (atomic_dec_and_test(&rt->rt6i_ref))
167 dst_free(&rt->u.dst);
170 static struct fib6_table fib6_main_tbl = {
171 .tb6_id = RT6_TABLE_MAIN,
172 .tb6_root = {
173 .leaf = &ip6_null_entry,
174 .fn_flags = RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO,
178 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
179 #define FIB_TABLE_HASHSZ 256
180 #else
181 #define FIB_TABLE_HASHSZ 1
182 #endif
183 static struct hlist_head fib_table_hash[FIB_TABLE_HASHSZ];
185 static void fib6_link_table(struct fib6_table *tb)
187 unsigned int h;
190 * Initialize table lock at a single place to give lockdep a key,
191 * tables aren't visible prior to being linked to the list.
193 rwlock_init(&tb->tb6_lock);
195 h = tb->tb6_id & (FIB_TABLE_HASHSZ - 1);
198 * No protection necessary, this is the only list mutatation
199 * operation, tables never disappear once they exist.
201 hlist_add_head_rcu(&tb->tb6_hlist, &fib_table_hash[h]);
204 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
205 static struct fib6_table fib6_local_tbl = {
206 .tb6_id = RT6_TABLE_LOCAL,
207 .tb6_root = {
208 .leaf = &ip6_null_entry,
209 .fn_flags = RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO,
213 static struct fib6_table *fib6_alloc_table(u32 id)
215 struct fib6_table *table;
217 table = kzalloc(sizeof(*table), GFP_ATOMIC);
218 if (table != NULL) {
219 table->tb6_id = id;
220 table->tb6_root.leaf = &ip6_null_entry;
221 table->tb6_root.fn_flags = RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO;
224 return table;
227 struct fib6_table *fib6_new_table(u32 id)
229 struct fib6_table *tb;
231 if (id == 0)
232 id = RT6_TABLE_MAIN;
233 tb = fib6_get_table(id);
234 if (tb)
235 return tb;
237 tb = fib6_alloc_table(id);
238 if (tb != NULL)
239 fib6_link_table(tb);
241 return tb;
244 struct fib6_table *fib6_get_table(u32 id)
246 struct fib6_table *tb;
247 struct hlist_node *node;
248 unsigned int h;
250 if (id == 0)
251 id = RT6_TABLE_MAIN;
252 h = id & (FIB_TABLE_HASHSZ - 1);
253 rcu_read_lock();
254 hlist_for_each_entry_rcu(tb, node, &fib_table_hash[h], tb6_hlist) {
255 if (tb->tb6_id == id) {
256 rcu_read_unlock();
257 return tb;
260 rcu_read_unlock();
262 return NULL;
265 static void __init fib6_tables_init(void)
267 fib6_link_table(&fib6_main_tbl);
268 fib6_link_table(&fib6_local_tbl);
271 #else
273 struct fib6_table *fib6_new_table(u32 id)
275 return fib6_get_table(id);
278 struct fib6_table *fib6_get_table(u32 id)
280 return &fib6_main_tbl;
283 struct dst_entry *fib6_rule_lookup(struct flowi *fl, int flags,
284 pol_lookup_t lookup)
286 return (struct dst_entry *) lookup(&fib6_main_tbl, fl, flags);
289 static void __init fib6_tables_init(void)
291 fib6_link_table(&fib6_main_tbl);
294 #endif
296 static int fib6_dump_node(struct fib6_walker_t *w)
298 int res;
299 struct rt6_info *rt;
301 for (rt = w->leaf; rt; rt = rt->u.next) {
302 res = rt6_dump_route(rt, w->args);
303 if (res < 0) {
304 /* Frame is full, suspend walking */
305 w->leaf = rt;
306 return 1;
308 BUG_TRAP(res!=0);
310 w->leaf = NULL;
311 return 0;
314 static void fib6_dump_end(struct netlink_callback *cb)
316 struct fib6_walker_t *w = (void*)cb->args[2];
318 if (w) {
319 cb->args[2] = 0;
320 kfree(w);
322 cb->done = (void*)cb->args[3];
323 cb->args[1] = 3;
326 static int fib6_dump_done(struct netlink_callback *cb)
328 fib6_dump_end(cb);
329 return cb->done ? cb->done(cb) : 0;
332 static int fib6_dump_table(struct fib6_table *table, struct sk_buff *skb,
333 struct netlink_callback *cb)
335 struct fib6_walker_t *w;
336 int res;
338 w = (void *)cb->args[2];
339 w->root = &table->tb6_root;
341 if (cb->args[4] == 0) {
342 read_lock_bh(&table->tb6_lock);
343 res = fib6_walk(w);
344 read_unlock_bh(&table->tb6_lock);
345 if (res > 0)
346 cb->args[4] = 1;
347 } else {
348 read_lock_bh(&table->tb6_lock);
349 res = fib6_walk_continue(w);
350 read_unlock_bh(&table->tb6_lock);
351 if (res != 0) {
352 if (res < 0)
353 fib6_walker_unlink(w);
354 goto end;
356 fib6_walker_unlink(w);
357 cb->args[4] = 0;
359 end:
360 return res;
363 int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
365 unsigned int h, s_h;
366 unsigned int e = 0, s_e;
367 struct rt6_rtnl_dump_arg arg;
368 struct fib6_walker_t *w;
369 struct fib6_table *tb;
370 struct hlist_node *node;
371 int res = 0;
373 s_h = cb->args[0];
374 s_e = cb->args[1];
376 w = (void *)cb->args[2];
377 if (w == NULL) {
378 /* New dump:
380 * 1. hook callback destructor.
382 cb->args[3] = (long)cb->done;
383 cb->done = fib6_dump_done;
386 * 2. allocate and initialize walker.
388 w = kzalloc(sizeof(*w), GFP_ATOMIC);
389 if (w == NULL)
390 return -ENOMEM;
391 w->func = fib6_dump_node;
392 cb->args[2] = (long)w;
395 arg.skb = skb;
396 arg.cb = cb;
397 w->args = &arg;
399 for (h = s_h; h < FIB_TABLE_HASHSZ; h++, s_e = 0) {
400 e = 0;
401 hlist_for_each_entry(tb, node, &fib_table_hash[h], tb6_hlist) {
402 if (e < s_e)
403 goto next;
404 res = fib6_dump_table(tb, skb, cb);
405 if (res != 0)
406 goto out;
407 next:
408 e++;
411 out:
412 cb->args[1] = e;
413 cb->args[0] = h;
415 res = res < 0 ? res : skb->len;
416 if (res <= 0)
417 fib6_dump_end(cb);
418 return res;
422 * Routing Table
424 * return the appropriate node for a routing tree "add" operation
425 * by either creating and inserting or by returning an existing
426 * node.
429 static struct fib6_node * fib6_add_1(struct fib6_node *root, void *addr,
430 int addrlen, int plen,
431 int offset)
433 struct fib6_node *fn, *in, *ln;
434 struct fib6_node *pn = NULL;
435 struct rt6key *key;
436 int bit;
437 __be32 dir = 0;
438 __u32 sernum = fib6_new_sernum();
440 RT6_TRACE("fib6_add_1\n");
442 /* insert node in tree */
444 fn = root;
446 do {
447 key = (struct rt6key *)((u8 *)fn->leaf + offset);
450 * Prefix match
452 if (plen < fn->fn_bit ||
453 !ipv6_prefix_equal(&key->addr, addr, fn->fn_bit))
454 goto insert_above;
457 * Exact match ?
460 if (plen == fn->fn_bit) {
461 /* clean up an intermediate node */
462 if ((fn->fn_flags & RTN_RTINFO) == 0) {
463 rt6_release(fn->leaf);
464 fn->leaf = NULL;
467 fn->fn_sernum = sernum;
469 return fn;
473 * We have more bits to go
476 /* Try to walk down on tree. */
477 fn->fn_sernum = sernum;
478 dir = addr_bit_set(addr, fn->fn_bit);
479 pn = fn;
480 fn = dir ? fn->right: fn->left;
481 } while (fn);
484 * We walked to the bottom of tree.
485 * Create new leaf node without children.
488 ln = node_alloc();
490 if (ln == NULL)
491 return NULL;
492 ln->fn_bit = plen;
494 ln->parent = pn;
495 ln->fn_sernum = sernum;
497 if (dir)
498 pn->right = ln;
499 else
500 pn->left = ln;
502 return ln;
505 insert_above:
507 * split since we don't have a common prefix anymore or
508 * we have a less significant route.
509 * we've to insert an intermediate node on the list
510 * this new node will point to the one we need to create
511 * and the current
514 pn = fn->parent;
516 /* find 1st bit in difference between the 2 addrs.
518 See comment in __ipv6_addr_diff: bit may be an invalid value,
519 but if it is >= plen, the value is ignored in any case.
522 bit = __ipv6_addr_diff(addr, &key->addr, addrlen);
525 * (intermediate)[in]
526 * / \
527 * (new leaf node)[ln] (old node)[fn]
529 if (plen > bit) {
530 in = node_alloc();
531 ln = node_alloc();
533 if (in == NULL || ln == NULL) {
534 if (in)
535 node_free(in);
536 if (ln)
537 node_free(ln);
538 return NULL;
542 * new intermediate node.
543 * RTN_RTINFO will
544 * be off since that an address that chooses one of
545 * the branches would not match less specific routes
546 * in the other branch
549 in->fn_bit = bit;
551 in->parent = pn;
552 in->leaf = fn->leaf;
553 atomic_inc(&in->leaf->rt6i_ref);
555 in->fn_sernum = sernum;
557 /* update parent pointer */
558 if (dir)
559 pn->right = in;
560 else
561 pn->left = in;
563 ln->fn_bit = plen;
565 ln->parent = in;
566 fn->parent = in;
568 ln->fn_sernum = sernum;
570 if (addr_bit_set(addr, bit)) {
571 in->right = ln;
572 in->left = fn;
573 } else {
574 in->left = ln;
575 in->right = fn;
577 } else { /* plen <= bit */
580 * (new leaf node)[ln]
581 * / \
582 * (old node)[fn] NULL
585 ln = node_alloc();
587 if (ln == NULL)
588 return NULL;
590 ln->fn_bit = plen;
592 ln->parent = pn;
594 ln->fn_sernum = sernum;
596 if (dir)
597 pn->right = ln;
598 else
599 pn->left = ln;
601 if (addr_bit_set(&key->addr, plen))
602 ln->right = fn;
603 else
604 ln->left = fn;
606 fn->parent = ln;
608 return ln;
612 * Insert routing information in a node.
615 static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
616 struct nl_info *info)
618 struct rt6_info *iter = NULL;
619 struct rt6_info **ins;
621 ins = &fn->leaf;
623 for (iter = fn->leaf; iter; iter=iter->u.next) {
625 * Search for duplicates
628 if (iter->rt6i_metric == rt->rt6i_metric) {
630 * Same priority level
633 if (iter->rt6i_dev == rt->rt6i_dev &&
634 iter->rt6i_idev == rt->rt6i_idev &&
635 ipv6_addr_equal(&iter->rt6i_gateway,
636 &rt->rt6i_gateway)) {
637 if (!(iter->rt6i_flags&RTF_EXPIRES))
638 return -EEXIST;
639 iter->rt6i_expires = rt->rt6i_expires;
640 if (!(rt->rt6i_flags&RTF_EXPIRES)) {
641 iter->rt6i_flags &= ~RTF_EXPIRES;
642 iter->rt6i_expires = 0;
644 return -EEXIST;
648 if (iter->rt6i_metric > rt->rt6i_metric)
649 break;
651 ins = &iter->u.next;
654 /* Reset round-robin state, if necessary */
655 if (ins == &fn->leaf)
656 fn->rr_ptr = NULL;
659 * insert node
662 out:
663 rt->u.next = iter;
664 *ins = rt;
665 rt->rt6i_node = fn;
666 atomic_inc(&rt->rt6i_ref);
667 inet6_rt_notify(RTM_NEWROUTE, rt, info);
668 rt6_stats.fib_rt_entries++;
670 if ((fn->fn_flags & RTN_RTINFO) == 0) {
671 rt6_stats.fib_route_nodes++;
672 fn->fn_flags |= RTN_RTINFO;
675 return 0;
678 static __inline__ void fib6_start_gc(struct rt6_info *rt)
680 if (ip6_fib_timer.expires == 0 &&
681 (rt->rt6i_flags & (RTF_EXPIRES|RTF_CACHE)))
682 mod_timer(&ip6_fib_timer, jiffies + ip6_rt_gc_interval);
685 void fib6_force_start_gc(void)
687 if (ip6_fib_timer.expires == 0)
688 mod_timer(&ip6_fib_timer, jiffies + ip6_rt_gc_interval);
692 * Add routing information to the routing tree.
693 * <destination addr>/<source addr>
694 * with source addr info in sub-trees
697 int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info)
699 struct fib6_node *fn, *pn = NULL;
700 int err = -ENOMEM;
702 fn = fib6_add_1(root, &rt->rt6i_dst.addr, sizeof(struct in6_addr),
703 rt->rt6i_dst.plen, offsetof(struct rt6_info, rt6i_dst));
705 if (fn == NULL)
706 goto out;
708 pn = fn;
710 #ifdef CONFIG_IPV6_SUBTREES
711 if (rt->rt6i_src.plen) {
712 struct fib6_node *sn;
714 if (fn->subtree == NULL) {
715 struct fib6_node *sfn;
718 * Create subtree.
720 * fn[main tree]
722 * sfn[subtree root]
724 * sn[new leaf node]
727 /* Create subtree root node */
728 sfn = node_alloc();
729 if (sfn == NULL)
730 goto st_failure;
732 sfn->leaf = &ip6_null_entry;
733 atomic_inc(&ip6_null_entry.rt6i_ref);
734 sfn->fn_flags = RTN_ROOT;
735 sfn->fn_sernum = fib6_new_sernum();
737 /* Now add the first leaf node to new subtree */
739 sn = fib6_add_1(sfn, &rt->rt6i_src.addr,
740 sizeof(struct in6_addr), rt->rt6i_src.plen,
741 offsetof(struct rt6_info, rt6i_src));
743 if (sn == NULL) {
744 /* If it is failed, discard just allocated
745 root, and then (in st_failure) stale node
746 in main tree.
748 node_free(sfn);
749 goto st_failure;
752 /* Now link new subtree to main tree */
753 sfn->parent = fn;
754 fn->subtree = sfn;
755 } else {
756 sn = fib6_add_1(fn->subtree, &rt->rt6i_src.addr,
757 sizeof(struct in6_addr), rt->rt6i_src.plen,
758 offsetof(struct rt6_info, rt6i_src));
760 if (sn == NULL)
761 goto st_failure;
764 if (fn->leaf == NULL) {
765 fn->leaf = rt;
766 atomic_inc(&rt->rt6i_ref);
768 fn = sn;
770 #endif
772 err = fib6_add_rt2node(fn, rt, info);
774 if (err == 0) {
775 fib6_start_gc(rt);
776 if (!(rt->rt6i_flags&RTF_CACHE))
777 fib6_prune_clones(pn, rt);
780 out:
781 if (err) {
782 #ifdef CONFIG_IPV6_SUBTREES
784 * If fib6_add_1 has cleared the old leaf pointer in the
785 * super-tree leaf node we have to find a new one for it.
787 if (pn != fn && !pn->leaf && !(pn->fn_flags & RTN_RTINFO)) {
788 pn->leaf = fib6_find_prefix(pn);
789 #if RT6_DEBUG >= 2
790 if (!pn->leaf) {
791 BUG_TRAP(pn->leaf != NULL);
792 pn->leaf = &ip6_null_entry;
794 #endif
795 atomic_inc(&pn->leaf->rt6i_ref);
797 #endif
798 dst_free(&rt->u.dst);
800 return err;
802 #ifdef CONFIG_IPV6_SUBTREES
803 /* Subtree creation failed, probably main tree node
804 is orphan. If it is, shoot it.
806 st_failure:
807 if (fn && !(fn->fn_flags & (RTN_RTINFO|RTN_ROOT)))
808 fib6_repair_tree(fn);
809 dst_free(&rt->u.dst);
810 return err;
811 #endif
815 * Routing tree lookup
819 struct lookup_args {
820 int offset; /* key offset on rt6_info */
821 struct in6_addr *addr; /* search key */
824 static struct fib6_node * fib6_lookup_1(struct fib6_node *root,
825 struct lookup_args *args)
827 struct fib6_node *fn;
828 __be32 dir;
830 if (unlikely(args->offset == 0))
831 return NULL;
834 * Descend on a tree
837 fn = root;
839 for (;;) {
840 struct fib6_node *next;
842 dir = addr_bit_set(args->addr, fn->fn_bit);
844 next = dir ? fn->right : fn->left;
846 if (next) {
847 fn = next;
848 continue;
851 break;
854 while(fn) {
855 if (FIB6_SUBTREE(fn) || fn->fn_flags & RTN_RTINFO) {
856 struct rt6key *key;
858 key = (struct rt6key *) ((u8 *) fn->leaf +
859 args->offset);
861 if (ipv6_prefix_equal(&key->addr, args->addr, key->plen)) {
862 #ifdef CONFIG_IPV6_SUBTREES
863 if (fn->subtree)
864 fn = fib6_lookup_1(fn->subtree, args + 1);
865 #endif
866 if (!fn || fn->fn_flags & RTN_RTINFO)
867 return fn;
871 if (fn->fn_flags & RTN_ROOT)
872 break;
874 fn = fn->parent;
877 return NULL;
880 struct fib6_node * fib6_lookup(struct fib6_node *root, struct in6_addr *daddr,
881 struct in6_addr *saddr)
883 struct fib6_node *fn;
884 struct lookup_args args[] = {
886 .offset = offsetof(struct rt6_info, rt6i_dst),
887 .addr = daddr,
889 #ifdef CONFIG_IPV6_SUBTREES
891 .offset = offsetof(struct rt6_info, rt6i_src),
892 .addr = saddr,
894 #endif
896 .offset = 0, /* sentinel */
900 fn = fib6_lookup_1(root, daddr ? args : args + 1);
902 if (fn == NULL || fn->fn_flags & RTN_TL_ROOT)
903 fn = root;
905 return fn;
909 * Get node with specified destination prefix (and source prefix,
910 * if subtrees are used)
914 static struct fib6_node * fib6_locate_1(struct fib6_node *root,
915 struct in6_addr *addr,
916 int plen, int offset)
918 struct fib6_node *fn;
920 for (fn = root; fn ; ) {
921 struct rt6key *key = (struct rt6key *)((u8 *)fn->leaf + offset);
924 * Prefix match
926 if (plen < fn->fn_bit ||
927 !ipv6_prefix_equal(&key->addr, addr, fn->fn_bit))
928 return NULL;
930 if (plen == fn->fn_bit)
931 return fn;
934 * We have more bits to go
936 if (addr_bit_set(addr, fn->fn_bit))
937 fn = fn->right;
938 else
939 fn = fn->left;
941 return NULL;
944 struct fib6_node * fib6_locate(struct fib6_node *root,
945 struct in6_addr *daddr, int dst_len,
946 struct in6_addr *saddr, int src_len)
948 struct fib6_node *fn;
950 fn = fib6_locate_1(root, daddr, dst_len,
951 offsetof(struct rt6_info, rt6i_dst));
953 #ifdef CONFIG_IPV6_SUBTREES
954 if (src_len) {
955 BUG_TRAP(saddr!=NULL);
956 if (fn && fn->subtree)
957 fn = fib6_locate_1(fn->subtree, saddr, src_len,
958 offsetof(struct rt6_info, rt6i_src));
960 #endif
962 if (fn && fn->fn_flags&RTN_RTINFO)
963 return fn;
965 return NULL;
970 * Deletion
974 static struct rt6_info * fib6_find_prefix(struct fib6_node *fn)
976 if (fn->fn_flags&RTN_ROOT)
977 return &ip6_null_entry;
979 while(fn) {
980 if(fn->left)
981 return fn->left->leaf;
983 if(fn->right)
984 return fn->right->leaf;
986 fn = FIB6_SUBTREE(fn);
988 return NULL;
992 * Called to trim the tree of intermediate nodes when possible. "fn"
993 * is the node we want to try and remove.
996 static struct fib6_node * fib6_repair_tree(struct fib6_node *fn)
998 int children;
999 int nstate;
1000 struct fib6_node *child, *pn;
1001 struct fib6_walker_t *w;
1002 int iter = 0;
1004 for (;;) {
1005 RT6_TRACE("fixing tree: plen=%d iter=%d\n", fn->fn_bit, iter);
1006 iter++;
1008 BUG_TRAP(!(fn->fn_flags&RTN_RTINFO));
1009 BUG_TRAP(!(fn->fn_flags&RTN_TL_ROOT));
1010 BUG_TRAP(fn->leaf==NULL);
1012 children = 0;
1013 child = NULL;
1014 if (fn->right) child = fn->right, children |= 1;
1015 if (fn->left) child = fn->left, children |= 2;
1017 if (children == 3 || FIB6_SUBTREE(fn)
1018 #ifdef CONFIG_IPV6_SUBTREES
1019 /* Subtree root (i.e. fn) may have one child */
1020 || (children && fn->fn_flags&RTN_ROOT)
1021 #endif
1023 fn->leaf = fib6_find_prefix(fn);
1024 #if RT6_DEBUG >= 2
1025 if (fn->leaf==NULL) {
1026 BUG_TRAP(fn->leaf);
1027 fn->leaf = &ip6_null_entry;
1029 #endif
1030 atomic_inc(&fn->leaf->rt6i_ref);
1031 return fn->parent;
1034 pn = fn->parent;
1035 #ifdef CONFIG_IPV6_SUBTREES
1036 if (FIB6_SUBTREE(pn) == fn) {
1037 BUG_TRAP(fn->fn_flags&RTN_ROOT);
1038 FIB6_SUBTREE(pn) = NULL;
1039 nstate = FWS_L;
1040 } else {
1041 BUG_TRAP(!(fn->fn_flags&RTN_ROOT));
1042 #endif
1043 if (pn->right == fn) pn->right = child;
1044 else if (pn->left == fn) pn->left = child;
1045 #if RT6_DEBUG >= 2
1046 else BUG_TRAP(0);
1047 #endif
1048 if (child)
1049 child->parent = pn;
1050 nstate = FWS_R;
1051 #ifdef CONFIG_IPV6_SUBTREES
1053 #endif
1055 read_lock(&fib6_walker_lock);
1056 FOR_WALKERS(w) {
1057 if (child == NULL) {
1058 if (w->root == fn) {
1059 w->root = w->node = NULL;
1060 RT6_TRACE("W %p adjusted by delroot 1\n", w);
1061 } else if (w->node == fn) {
1062 RT6_TRACE("W %p adjusted by delnode 1, s=%d/%d\n", w, w->state, nstate);
1063 w->node = pn;
1064 w->state = nstate;
1066 } else {
1067 if (w->root == fn) {
1068 w->root = child;
1069 RT6_TRACE("W %p adjusted by delroot 2\n", w);
1071 if (w->node == fn) {
1072 w->node = child;
1073 if (children&2) {
1074 RT6_TRACE("W %p adjusted by delnode 2, s=%d\n", w, w->state);
1075 w->state = w->state>=FWS_R ? FWS_U : FWS_INIT;
1076 } else {
1077 RT6_TRACE("W %p adjusted by delnode 2, s=%d\n", w, w->state);
1078 w->state = w->state>=FWS_C ? FWS_U : FWS_INIT;
1083 read_unlock(&fib6_walker_lock);
1085 node_free(fn);
1086 if (pn->fn_flags&RTN_RTINFO || FIB6_SUBTREE(pn))
1087 return pn;
1089 rt6_release(pn->leaf);
1090 pn->leaf = NULL;
1091 fn = pn;
1095 static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp,
1096 struct nl_info *info)
1098 struct fib6_walker_t *w;
1099 struct rt6_info *rt = *rtp;
1101 RT6_TRACE("fib6_del_route\n");
1103 /* Unlink it */
1104 *rtp = rt->u.next;
1105 rt->rt6i_node = NULL;
1106 rt6_stats.fib_rt_entries--;
1107 rt6_stats.fib_discarded_routes++;
1109 /* Reset round-robin state, if necessary */
1110 if (fn->rr_ptr == rt)
1111 fn->rr_ptr = NULL;
1113 /* Adjust walkers */
1114 read_lock(&fib6_walker_lock);
1115 FOR_WALKERS(w) {
1116 if (w->state == FWS_C && w->leaf == rt) {
1117 RT6_TRACE("walker %p adjusted by delroute\n", w);
1118 w->leaf = rt->u.next;
1119 if (w->leaf == NULL)
1120 w->state = FWS_U;
1123 read_unlock(&fib6_walker_lock);
1125 rt->u.next = NULL;
1127 if (fn->leaf == NULL && fn->fn_flags&RTN_TL_ROOT)
1128 fn->leaf = &ip6_null_entry;
1130 /* If it was last route, expunge its radix tree node */
1131 if (fn->leaf == NULL) {
1132 fn->fn_flags &= ~RTN_RTINFO;
1133 rt6_stats.fib_route_nodes--;
1134 fn = fib6_repair_tree(fn);
1137 if (atomic_read(&rt->rt6i_ref) != 1) {
1138 /* This route is used as dummy address holder in some split
1139 * nodes. It is not leaked, but it still holds other resources,
1140 * which must be released in time. So, scan ascendant nodes
1141 * and replace dummy references to this route with references
1142 * to still alive ones.
1144 while (fn) {
1145 if (!(fn->fn_flags&RTN_RTINFO) && fn->leaf == rt) {
1146 fn->leaf = fib6_find_prefix(fn);
1147 atomic_inc(&fn->leaf->rt6i_ref);
1148 rt6_release(rt);
1150 fn = fn->parent;
1152 /* No more references are possible at this point. */
1153 if (atomic_read(&rt->rt6i_ref) != 1) BUG();
1156 inet6_rt_notify(RTM_DELROUTE, rt, info);
1157 rt6_release(rt);
1160 int fib6_del(struct rt6_info *rt, struct nl_info *info)
1162 struct fib6_node *fn = rt->rt6i_node;
1163 struct rt6_info **rtp;
1165 #if RT6_DEBUG >= 2
1166 if (rt->u.dst.obsolete>0) {
1167 BUG_TRAP(fn==NULL);
1168 return -ENOENT;
1170 #endif
1171 if (fn == NULL || rt == &ip6_null_entry)
1172 return -ENOENT;
1174 BUG_TRAP(fn->fn_flags&RTN_RTINFO);
1176 if (!(rt->rt6i_flags&RTF_CACHE)) {
1177 struct fib6_node *pn = fn;
1178 #ifdef CONFIG_IPV6_SUBTREES
1179 /* clones of this route might be in another subtree */
1180 if (rt->rt6i_src.plen) {
1181 while (!(pn->fn_flags&RTN_ROOT))
1182 pn = pn->parent;
1183 pn = pn->parent;
1185 #endif
1186 fib6_prune_clones(pn, rt);
1190 * Walk the leaf entries looking for ourself
1193 for (rtp = &fn->leaf; *rtp; rtp = &(*rtp)->u.next) {
1194 if (*rtp == rt) {
1195 fib6_del_route(fn, rtp, info);
1196 return 0;
1199 return -ENOENT;
1203 * Tree traversal function.
1205 * Certainly, it is not interrupt safe.
1206 * However, it is internally reenterable wrt itself and fib6_add/fib6_del.
1207 * It means, that we can modify tree during walking
1208 * and use this function for garbage collection, clone pruning,
1209 * cleaning tree when a device goes down etc. etc.
1211 * It guarantees that every node will be traversed,
1212 * and that it will be traversed only once.
1214 * Callback function w->func may return:
1215 * 0 -> continue walking.
1216 * positive value -> walking is suspended (used by tree dumps,
1217 * and probably by gc, if it will be split to several slices)
1218 * negative value -> terminate walking.
1220 * The function itself returns:
1221 * 0 -> walk is complete.
1222 * >0 -> walk is incomplete (i.e. suspended)
1223 * <0 -> walk is terminated by an error.
1226 static int fib6_walk_continue(struct fib6_walker_t *w)
1228 struct fib6_node *fn, *pn;
1230 for (;;) {
1231 fn = w->node;
1232 if (fn == NULL)
1233 return 0;
1235 if (w->prune && fn != w->root &&
1236 fn->fn_flags&RTN_RTINFO && w->state < FWS_C) {
1237 w->state = FWS_C;
1238 w->leaf = fn->leaf;
1240 switch (w->state) {
1241 #ifdef CONFIG_IPV6_SUBTREES
1242 case FWS_S:
1243 if (FIB6_SUBTREE(fn)) {
1244 w->node = FIB6_SUBTREE(fn);
1245 continue;
1247 w->state = FWS_L;
1248 #endif
1249 case FWS_L:
1250 if (fn->left) {
1251 w->node = fn->left;
1252 w->state = FWS_INIT;
1253 continue;
1255 w->state = FWS_R;
1256 case FWS_R:
1257 if (fn->right) {
1258 w->node = fn->right;
1259 w->state = FWS_INIT;
1260 continue;
1262 w->state = FWS_C;
1263 w->leaf = fn->leaf;
1264 case FWS_C:
1265 if (w->leaf && fn->fn_flags&RTN_RTINFO) {
1266 int err = w->func(w);
1267 if (err)
1268 return err;
1269 continue;
1271 w->state = FWS_U;
1272 case FWS_U:
1273 if (fn == w->root)
1274 return 0;
1275 pn = fn->parent;
1276 w->node = pn;
1277 #ifdef CONFIG_IPV6_SUBTREES
1278 if (FIB6_SUBTREE(pn) == fn) {
1279 BUG_TRAP(fn->fn_flags&RTN_ROOT);
1280 w->state = FWS_L;
1281 continue;
1283 #endif
1284 if (pn->left == fn) {
1285 w->state = FWS_R;
1286 continue;
1288 if (pn->right == fn) {
1289 w->state = FWS_C;
1290 w->leaf = w->node->leaf;
1291 continue;
1293 #if RT6_DEBUG >= 2
1294 BUG_TRAP(0);
1295 #endif
1300 static int fib6_walk(struct fib6_walker_t *w)
1302 int res;
1304 w->state = FWS_INIT;
1305 w->node = w->root;
1307 fib6_walker_link(w);
1308 res = fib6_walk_continue(w);
1309 if (res <= 0)
1310 fib6_walker_unlink(w);
1311 return res;
1314 static int fib6_clean_node(struct fib6_walker_t *w)
1316 int res;
1317 struct rt6_info *rt;
1318 struct fib6_cleaner_t *c = (struct fib6_cleaner_t*)w;
1320 for (rt = w->leaf; rt; rt = rt->u.next) {
1321 res = c->func(rt, c->arg);
1322 if (res < 0) {
1323 w->leaf = rt;
1324 res = fib6_del(rt, NULL);
1325 if (res) {
1326 #if RT6_DEBUG >= 2
1327 printk(KERN_DEBUG "fib6_clean_node: del failed: rt=%p@%p err=%d\n", rt, rt->rt6i_node, res);
1328 #endif
1329 continue;
1331 return 0;
1333 BUG_TRAP(res==0);
1335 w->leaf = rt;
1336 return 0;
1340 * Convenient frontend to tree walker.
1342 * func is called on each route.
1343 * It may return -1 -> delete this route.
1344 * 0 -> continue walking
1346 * prune==1 -> only immediate children of node (certainly,
1347 * ignoring pure split nodes) will be scanned.
1350 static void fib6_clean_tree(struct fib6_node *root,
1351 int (*func)(struct rt6_info *, void *arg),
1352 int prune, void *arg)
1354 struct fib6_cleaner_t c;
1356 c.w.root = root;
1357 c.w.func = fib6_clean_node;
1358 c.w.prune = prune;
1359 c.func = func;
1360 c.arg = arg;
1362 fib6_walk(&c.w);
1365 void fib6_clean_all(int (*func)(struct rt6_info *, void *arg),
1366 int prune, void *arg)
1368 struct fib6_table *table;
1369 struct hlist_node *node;
1370 unsigned int h;
1372 rcu_read_lock();
1373 for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
1374 hlist_for_each_entry_rcu(table, node, &fib_table_hash[h],
1375 tb6_hlist) {
1376 write_lock_bh(&table->tb6_lock);
1377 fib6_clean_tree(&table->tb6_root, func, prune, arg);
1378 write_unlock_bh(&table->tb6_lock);
1381 rcu_read_unlock();
1384 static int fib6_prune_clone(struct rt6_info *rt, void *arg)
1386 if (rt->rt6i_flags & RTF_CACHE) {
1387 RT6_TRACE("pruning clone %p\n", rt);
1388 return -1;
1391 return 0;
1394 static void fib6_prune_clones(struct fib6_node *fn, struct rt6_info *rt)
1396 fib6_clean_tree(fn, fib6_prune_clone, 1, rt);
1400 * Garbage collection
1403 static struct fib6_gc_args
1405 int timeout;
1406 int more;
1407 } gc_args;
1409 static int fib6_age(struct rt6_info *rt, void *arg)
1411 unsigned long now = jiffies;
1414 * check addrconf expiration here.
1415 * Routes are expired even if they are in use.
1417 * Also age clones. Note, that clones are aged out
1418 * only if they are not in use now.
1421 if (rt->rt6i_flags&RTF_EXPIRES && rt->rt6i_expires) {
1422 if (time_after(now, rt->rt6i_expires)) {
1423 RT6_TRACE("expiring %p\n", rt);
1424 return -1;
1426 gc_args.more++;
1427 } else if (rt->rt6i_flags & RTF_CACHE) {
1428 if (atomic_read(&rt->u.dst.__refcnt) == 0 &&
1429 time_after_eq(now, rt->u.dst.lastuse + gc_args.timeout)) {
1430 RT6_TRACE("aging clone %p\n", rt);
1431 return -1;
1432 } else if ((rt->rt6i_flags & RTF_GATEWAY) &&
1433 (!(rt->rt6i_nexthop->flags & NTF_ROUTER))) {
1434 RT6_TRACE("purging route %p via non-router but gateway\n",
1435 rt);
1436 return -1;
1438 gc_args.more++;
1441 return 0;
1444 static DEFINE_SPINLOCK(fib6_gc_lock);
1446 void fib6_run_gc(unsigned long dummy)
1448 if (dummy != ~0UL) {
1449 spin_lock_bh(&fib6_gc_lock);
1450 gc_args.timeout = dummy ? (int)dummy : ip6_rt_gc_interval;
1451 } else {
1452 local_bh_disable();
1453 if (!spin_trylock(&fib6_gc_lock)) {
1454 mod_timer(&ip6_fib_timer, jiffies + HZ);
1455 local_bh_enable();
1456 return;
1458 gc_args.timeout = ip6_rt_gc_interval;
1460 gc_args.more = 0;
1462 ndisc_dst_gc(&gc_args.more);
1463 fib6_clean_all(fib6_age, 0, NULL);
1465 if (gc_args.more)
1466 mod_timer(&ip6_fib_timer, jiffies + ip6_rt_gc_interval);
1467 else {
1468 del_timer(&ip6_fib_timer);
1469 ip6_fib_timer.expires = 0;
1471 spin_unlock_bh(&fib6_gc_lock);
1474 void __init fib6_init(void)
1476 fib6_node_kmem = kmem_cache_create("fib6_nodes",
1477 sizeof(struct fib6_node),
1478 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
1479 NULL, NULL);
1481 fib6_tables_init();
1484 void fib6_gc_cleanup(void)
1486 del_timer(&ip6_fib_timer);
1487 kmem_cache_destroy(fib6_node_kmem);