ata/pata_arasan: remove conditional compilation of clk code
[linux-2.6/libata-dev.git] / net / ipv4 / fib_semantics.c
blobda0cc2e6b2500f89850642aa8e2d50d26d6a2ae3
1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * IPv4 Forwarding Information Base: semantics.
8 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
16 #include <asm/uaccess.h>
17 #include <linux/bitops.h>
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/jiffies.h>
21 #include <linux/mm.h>
22 #include <linux/string.h>
23 #include <linux/socket.h>
24 #include <linux/sockios.h>
25 #include <linux/errno.h>
26 #include <linux/in.h>
27 #include <linux/inet.h>
28 #include <linux/inetdevice.h>
29 #include <linux/netdevice.h>
30 #include <linux/if_arp.h>
31 #include <linux/proc_fs.h>
32 #include <linux/skbuff.h>
33 #include <linux/init.h>
34 #include <linux/slab.h>
36 #include <net/arp.h>
37 #include <net/ip.h>
38 #include <net/protocol.h>
39 #include <net/route.h>
40 #include <net/tcp.h>
41 #include <net/sock.h>
42 #include <net/ip_fib.h>
43 #include <net/netlink.h>
44 #include <net/nexthop.h>
46 #include "fib_lookup.h"
48 static DEFINE_SPINLOCK(fib_info_lock);
49 static struct hlist_head *fib_info_hash;
50 static struct hlist_head *fib_info_laddrhash;
51 static unsigned int fib_info_hash_size;
52 static unsigned int fib_info_cnt;
54 #define DEVINDEX_HASHBITS 8
55 #define DEVINDEX_HASHSIZE (1U << DEVINDEX_HASHBITS)
56 static struct hlist_head fib_info_devhash[DEVINDEX_HASHSIZE];
58 #ifdef CONFIG_IP_ROUTE_MULTIPATH
60 static DEFINE_SPINLOCK(fib_multipath_lock);
62 #define for_nexthops(fi) { \
63 int nhsel; const struct fib_nh *nh; \
64 for (nhsel = 0, nh = (fi)->fib_nh; \
65 nhsel < (fi)->fib_nhs; \
66 nh++, nhsel++)
68 #define change_nexthops(fi) { \
69 int nhsel; struct fib_nh *nexthop_nh; \
70 for (nhsel = 0, nexthop_nh = (struct fib_nh *)((fi)->fib_nh); \
71 nhsel < (fi)->fib_nhs; \
72 nexthop_nh++, nhsel++)
74 #else /* CONFIG_IP_ROUTE_MULTIPATH */
76 /* Hope, that gcc will optimize it to get rid of dummy loop */
78 #define for_nexthops(fi) { \
79 int nhsel; const struct fib_nh *nh = (fi)->fib_nh; \
80 for (nhsel = 0; nhsel < 1; nhsel++)
82 #define change_nexthops(fi) { \
83 int nhsel; \
84 struct fib_nh *nexthop_nh = (struct fib_nh *)((fi)->fib_nh); \
85 for (nhsel = 0; nhsel < 1; nhsel++)
87 #endif /* CONFIG_IP_ROUTE_MULTIPATH */
89 #define endfor_nexthops(fi) }
92 const struct fib_prop fib_props[RTN_MAX + 1] = {
93 [RTN_UNSPEC] = {
94 .error = 0,
95 .scope = RT_SCOPE_NOWHERE,
97 [RTN_UNICAST] = {
98 .error = 0,
99 .scope = RT_SCOPE_UNIVERSE,
101 [RTN_LOCAL] = {
102 .error = 0,
103 .scope = RT_SCOPE_HOST,
105 [RTN_BROADCAST] = {
106 .error = 0,
107 .scope = RT_SCOPE_LINK,
109 [RTN_ANYCAST] = {
110 .error = 0,
111 .scope = RT_SCOPE_LINK,
113 [RTN_MULTICAST] = {
114 .error = 0,
115 .scope = RT_SCOPE_UNIVERSE,
117 [RTN_BLACKHOLE] = {
118 .error = -EINVAL,
119 .scope = RT_SCOPE_UNIVERSE,
121 [RTN_UNREACHABLE] = {
122 .error = -EHOSTUNREACH,
123 .scope = RT_SCOPE_UNIVERSE,
125 [RTN_PROHIBIT] = {
126 .error = -EACCES,
127 .scope = RT_SCOPE_UNIVERSE,
129 [RTN_THROW] = {
130 .error = -EAGAIN,
131 .scope = RT_SCOPE_UNIVERSE,
133 [RTN_NAT] = {
134 .error = -EINVAL,
135 .scope = RT_SCOPE_NOWHERE,
137 [RTN_XRESOLVE] = {
138 .error = -EINVAL,
139 .scope = RT_SCOPE_NOWHERE,
143 static void free_nh_exceptions(struct fib_nh *nh)
145 struct fnhe_hash_bucket *hash = nh->nh_exceptions;
146 int i;
148 for (i = 0; i < FNHE_HASH_SIZE; i++) {
149 struct fib_nh_exception *fnhe;
151 fnhe = rcu_dereference_protected(hash[i].chain, 1);
152 while (fnhe) {
153 struct fib_nh_exception *next;
155 next = rcu_dereference_protected(fnhe->fnhe_next, 1);
156 kfree(fnhe);
158 fnhe = next;
161 kfree(hash);
164 /* Release a nexthop info record */
165 static void free_fib_info_rcu(struct rcu_head *head)
167 struct fib_info *fi = container_of(head, struct fib_info, rcu);
169 change_nexthops(fi) {
170 if (nexthop_nh->nh_dev)
171 dev_put(nexthop_nh->nh_dev);
172 if (nexthop_nh->nh_exceptions)
173 free_nh_exceptions(nexthop_nh);
174 if (nexthop_nh->nh_rth_output)
175 dst_free(&nexthop_nh->nh_rth_output->dst);
176 if (nexthop_nh->nh_rth_input)
177 dst_free(&nexthop_nh->nh_rth_input->dst);
178 } endfor_nexthops(fi);
180 release_net(fi->fib_net);
181 if (fi->fib_metrics != (u32 *) dst_default_metrics)
182 kfree(fi->fib_metrics);
183 kfree(fi);
186 void free_fib_info(struct fib_info *fi)
188 if (fi->fib_dead == 0) {
189 pr_warn("Freeing alive fib_info %p\n", fi);
190 return;
192 fib_info_cnt--;
193 #ifdef CONFIG_IP_ROUTE_CLASSID
194 change_nexthops(fi) {
195 if (nexthop_nh->nh_tclassid)
196 fi->fib_net->ipv4.fib_num_tclassid_users--;
197 } endfor_nexthops(fi);
198 #endif
199 call_rcu(&fi->rcu, free_fib_info_rcu);
202 void fib_release_info(struct fib_info *fi)
204 spin_lock_bh(&fib_info_lock);
205 if (fi && --fi->fib_treeref == 0) {
206 hlist_del(&fi->fib_hash);
207 if (fi->fib_prefsrc)
208 hlist_del(&fi->fib_lhash);
209 change_nexthops(fi) {
210 if (!nexthop_nh->nh_dev)
211 continue;
212 hlist_del(&nexthop_nh->nh_hash);
213 } endfor_nexthops(fi)
214 fi->fib_dead = 1;
215 fib_info_put(fi);
217 spin_unlock_bh(&fib_info_lock);
220 static inline int nh_comp(const struct fib_info *fi, const struct fib_info *ofi)
222 const struct fib_nh *onh = ofi->fib_nh;
224 for_nexthops(fi) {
225 if (nh->nh_oif != onh->nh_oif ||
226 nh->nh_gw != onh->nh_gw ||
227 nh->nh_scope != onh->nh_scope ||
228 #ifdef CONFIG_IP_ROUTE_MULTIPATH
229 nh->nh_weight != onh->nh_weight ||
230 #endif
231 #ifdef CONFIG_IP_ROUTE_CLASSID
232 nh->nh_tclassid != onh->nh_tclassid ||
233 #endif
234 ((nh->nh_flags ^ onh->nh_flags) & ~RTNH_F_DEAD))
235 return -1;
236 onh++;
237 } endfor_nexthops(fi);
238 return 0;
241 static inline unsigned int fib_devindex_hashfn(unsigned int val)
243 unsigned int mask = DEVINDEX_HASHSIZE - 1;
245 return (val ^
246 (val >> DEVINDEX_HASHBITS) ^
247 (val >> (DEVINDEX_HASHBITS * 2))) & mask;
250 static inline unsigned int fib_info_hashfn(const struct fib_info *fi)
252 unsigned int mask = (fib_info_hash_size - 1);
253 unsigned int val = fi->fib_nhs;
255 val ^= (fi->fib_protocol << 8) | fi->fib_scope;
256 val ^= (__force u32)fi->fib_prefsrc;
257 val ^= fi->fib_priority;
258 for_nexthops(fi) {
259 val ^= fib_devindex_hashfn(nh->nh_oif);
260 } endfor_nexthops(fi)
262 return (val ^ (val >> 7) ^ (val >> 12)) & mask;
265 static struct fib_info *fib_find_info(const struct fib_info *nfi)
267 struct hlist_head *head;
268 struct hlist_node *node;
269 struct fib_info *fi;
270 unsigned int hash;
272 hash = fib_info_hashfn(nfi);
273 head = &fib_info_hash[hash];
275 hlist_for_each_entry(fi, node, head, fib_hash) {
276 if (!net_eq(fi->fib_net, nfi->fib_net))
277 continue;
278 if (fi->fib_nhs != nfi->fib_nhs)
279 continue;
280 if (nfi->fib_protocol == fi->fib_protocol &&
281 nfi->fib_scope == fi->fib_scope &&
282 nfi->fib_prefsrc == fi->fib_prefsrc &&
283 nfi->fib_priority == fi->fib_priority &&
284 memcmp(nfi->fib_metrics, fi->fib_metrics,
285 sizeof(u32) * RTAX_MAX) == 0 &&
286 ((nfi->fib_flags ^ fi->fib_flags) & ~RTNH_F_DEAD) == 0 &&
287 (nfi->fib_nhs == 0 || nh_comp(fi, nfi) == 0))
288 return fi;
291 return NULL;
294 /* Check, that the gateway is already configured.
295 * Used only by redirect accept routine.
297 int ip_fib_check_default(__be32 gw, struct net_device *dev)
299 struct hlist_head *head;
300 struct hlist_node *node;
301 struct fib_nh *nh;
302 unsigned int hash;
304 spin_lock(&fib_info_lock);
306 hash = fib_devindex_hashfn(dev->ifindex);
307 head = &fib_info_devhash[hash];
308 hlist_for_each_entry(nh, node, head, nh_hash) {
309 if (nh->nh_dev == dev &&
310 nh->nh_gw == gw &&
311 !(nh->nh_flags & RTNH_F_DEAD)) {
312 spin_unlock(&fib_info_lock);
313 return 0;
317 spin_unlock(&fib_info_lock);
319 return -1;
322 static inline size_t fib_nlmsg_size(struct fib_info *fi)
324 size_t payload = NLMSG_ALIGN(sizeof(struct rtmsg))
325 + nla_total_size(4) /* RTA_TABLE */
326 + nla_total_size(4) /* RTA_DST */
327 + nla_total_size(4) /* RTA_PRIORITY */
328 + nla_total_size(4); /* RTA_PREFSRC */
330 /* space for nested metrics */
331 payload += nla_total_size((RTAX_MAX * nla_total_size(4)));
333 if (fi->fib_nhs) {
334 /* Also handles the special case fib_nhs == 1 */
336 /* each nexthop is packed in an attribute */
337 size_t nhsize = nla_total_size(sizeof(struct rtnexthop));
339 /* may contain flow and gateway attribute */
340 nhsize += 2 * nla_total_size(4);
342 /* all nexthops are packed in a nested attribute */
343 payload += nla_total_size(fi->fib_nhs * nhsize);
346 return payload;
349 void rtmsg_fib(int event, __be32 key, struct fib_alias *fa,
350 int dst_len, u32 tb_id, struct nl_info *info,
351 unsigned int nlm_flags)
353 struct sk_buff *skb;
354 u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
355 int err = -ENOBUFS;
357 skb = nlmsg_new(fib_nlmsg_size(fa->fa_info), GFP_KERNEL);
358 if (skb == NULL)
359 goto errout;
361 err = fib_dump_info(skb, info->pid, seq, event, tb_id,
362 fa->fa_type, key, dst_len,
363 fa->fa_tos, fa->fa_info, nlm_flags);
364 if (err < 0) {
365 /* -EMSGSIZE implies BUG in fib_nlmsg_size() */
366 WARN_ON(err == -EMSGSIZE);
367 kfree_skb(skb);
368 goto errout;
370 rtnl_notify(skb, info->nl_net, info->pid, RTNLGRP_IPV4_ROUTE,
371 info->nlh, GFP_KERNEL);
372 return;
373 errout:
374 if (err < 0)
375 rtnl_set_sk_err(info->nl_net, RTNLGRP_IPV4_ROUTE, err);
378 /* Return the first fib alias matching TOS with
379 * priority less than or equal to PRIO.
381 struct fib_alias *fib_find_alias(struct list_head *fah, u8 tos, u32 prio)
383 if (fah) {
384 struct fib_alias *fa;
385 list_for_each_entry(fa, fah, fa_list) {
386 if (fa->fa_tos > tos)
387 continue;
388 if (fa->fa_info->fib_priority >= prio ||
389 fa->fa_tos < tos)
390 return fa;
393 return NULL;
396 int fib_detect_death(struct fib_info *fi, int order,
397 struct fib_info **last_resort, int *last_idx, int dflt)
399 struct neighbour *n;
400 int state = NUD_NONE;
402 n = neigh_lookup(&arp_tbl, &fi->fib_nh[0].nh_gw, fi->fib_dev);
403 if (n) {
404 state = n->nud_state;
405 neigh_release(n);
407 if (state == NUD_REACHABLE)
408 return 0;
409 if ((state & NUD_VALID) && order != dflt)
410 return 0;
411 if ((state & NUD_VALID) ||
412 (*last_idx < 0 && order > dflt)) {
413 *last_resort = fi;
414 *last_idx = order;
416 return 1;
419 #ifdef CONFIG_IP_ROUTE_MULTIPATH
421 static int fib_count_nexthops(struct rtnexthop *rtnh, int remaining)
423 int nhs = 0;
425 while (rtnh_ok(rtnh, remaining)) {
426 nhs++;
427 rtnh = rtnh_next(rtnh, &remaining);
430 /* leftover implies invalid nexthop configuration, discard it */
431 return remaining > 0 ? 0 : nhs;
434 static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
435 int remaining, struct fib_config *cfg)
437 change_nexthops(fi) {
438 int attrlen;
440 if (!rtnh_ok(rtnh, remaining))
441 return -EINVAL;
443 nexthop_nh->nh_flags =
444 (cfg->fc_flags & ~0xFF) | rtnh->rtnh_flags;
445 nexthop_nh->nh_oif = rtnh->rtnh_ifindex;
446 nexthop_nh->nh_weight = rtnh->rtnh_hops + 1;
448 attrlen = rtnh_attrlen(rtnh);
449 if (attrlen > 0) {
450 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
452 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
453 nexthop_nh->nh_gw = nla ? nla_get_be32(nla) : 0;
454 #ifdef CONFIG_IP_ROUTE_CLASSID
455 nla = nla_find(attrs, attrlen, RTA_FLOW);
456 nexthop_nh->nh_tclassid = nla ? nla_get_u32(nla) : 0;
457 if (nexthop_nh->nh_tclassid)
458 fi->fib_net->ipv4.fib_num_tclassid_users++;
459 #endif
462 rtnh = rtnh_next(rtnh, &remaining);
463 } endfor_nexthops(fi);
465 return 0;
468 #endif
470 int fib_nh_match(struct fib_config *cfg, struct fib_info *fi)
472 #ifdef CONFIG_IP_ROUTE_MULTIPATH
473 struct rtnexthop *rtnh;
474 int remaining;
475 #endif
477 if (cfg->fc_priority && cfg->fc_priority != fi->fib_priority)
478 return 1;
480 if (cfg->fc_oif || cfg->fc_gw) {
481 if ((!cfg->fc_oif || cfg->fc_oif == fi->fib_nh->nh_oif) &&
482 (!cfg->fc_gw || cfg->fc_gw == fi->fib_nh->nh_gw))
483 return 0;
484 return 1;
487 #ifdef CONFIG_IP_ROUTE_MULTIPATH
488 if (cfg->fc_mp == NULL)
489 return 0;
491 rtnh = cfg->fc_mp;
492 remaining = cfg->fc_mp_len;
494 for_nexthops(fi) {
495 int attrlen;
497 if (!rtnh_ok(rtnh, remaining))
498 return -EINVAL;
500 if (rtnh->rtnh_ifindex && rtnh->rtnh_ifindex != nh->nh_oif)
501 return 1;
503 attrlen = rtnh_attrlen(rtnh);
504 if (attrlen < 0) {
505 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
507 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
508 if (nla && nla_get_be32(nla) != nh->nh_gw)
509 return 1;
510 #ifdef CONFIG_IP_ROUTE_CLASSID
511 nla = nla_find(attrs, attrlen, RTA_FLOW);
512 if (nla && nla_get_u32(nla) != nh->nh_tclassid)
513 return 1;
514 #endif
517 rtnh = rtnh_next(rtnh, &remaining);
518 } endfor_nexthops(fi);
519 #endif
520 return 0;
525 * Picture
526 * -------
528 * Semantics of nexthop is very messy by historical reasons.
529 * We have to take into account, that:
530 * a) gateway can be actually local interface address,
531 * so that gatewayed route is direct.
532 * b) gateway must be on-link address, possibly
533 * described not by an ifaddr, but also by a direct route.
534 * c) If both gateway and interface are specified, they should not
535 * contradict.
536 * d) If we use tunnel routes, gateway could be not on-link.
538 * Attempt to reconcile all of these (alas, self-contradictory) conditions
539 * results in pretty ugly and hairy code with obscure logic.
541 * I chose to generalized it instead, so that the size
542 * of code does not increase practically, but it becomes
543 * much more general.
544 * Every prefix is assigned a "scope" value: "host" is local address,
545 * "link" is direct route,
546 * [ ... "site" ... "interior" ... ]
547 * and "universe" is true gateway route with global meaning.
549 * Every prefix refers to a set of "nexthop"s (gw, oif),
550 * where gw must have narrower scope. This recursion stops
551 * when gw has LOCAL scope or if "nexthop" is declared ONLINK,
552 * which means that gw is forced to be on link.
554 * Code is still hairy, but now it is apparently logically
555 * consistent and very flexible. F.e. as by-product it allows
556 * to co-exists in peace independent exterior and interior
557 * routing processes.
559 * Normally it looks as following.
561 * {universe prefix} -> (gw, oif) [scope link]
563 * |-> {link prefix} -> (gw, oif) [scope local]
565 * |-> {local prefix} (terminal node)
567 static int fib_check_nh(struct fib_config *cfg, struct fib_info *fi,
568 struct fib_nh *nh)
570 int err;
571 struct net *net;
572 struct net_device *dev;
574 net = cfg->fc_nlinfo.nl_net;
575 if (nh->nh_gw) {
576 struct fib_result res;
578 if (nh->nh_flags & RTNH_F_ONLINK) {
580 if (cfg->fc_scope >= RT_SCOPE_LINK)
581 return -EINVAL;
582 if (inet_addr_type(net, nh->nh_gw) != RTN_UNICAST)
583 return -EINVAL;
584 dev = __dev_get_by_index(net, nh->nh_oif);
585 if (!dev)
586 return -ENODEV;
587 if (!(dev->flags & IFF_UP))
588 return -ENETDOWN;
589 nh->nh_dev = dev;
590 dev_hold(dev);
591 nh->nh_scope = RT_SCOPE_LINK;
592 return 0;
594 rcu_read_lock();
596 struct flowi4 fl4 = {
597 .daddr = nh->nh_gw,
598 .flowi4_scope = cfg->fc_scope + 1,
599 .flowi4_oif = nh->nh_oif,
602 /* It is not necessary, but requires a bit of thinking */
603 if (fl4.flowi4_scope < RT_SCOPE_LINK)
604 fl4.flowi4_scope = RT_SCOPE_LINK;
605 err = fib_lookup(net, &fl4, &res);
606 if (err) {
607 rcu_read_unlock();
608 return err;
611 err = -EINVAL;
612 if (res.type != RTN_UNICAST && res.type != RTN_LOCAL)
613 goto out;
614 nh->nh_scope = res.scope;
615 nh->nh_oif = FIB_RES_OIF(res);
616 nh->nh_dev = dev = FIB_RES_DEV(res);
617 if (!dev)
618 goto out;
619 dev_hold(dev);
620 err = (dev->flags & IFF_UP) ? 0 : -ENETDOWN;
621 } else {
622 struct in_device *in_dev;
624 if (nh->nh_flags & (RTNH_F_PERVASIVE | RTNH_F_ONLINK))
625 return -EINVAL;
627 rcu_read_lock();
628 err = -ENODEV;
629 in_dev = inetdev_by_index(net, nh->nh_oif);
630 if (in_dev == NULL)
631 goto out;
632 err = -ENETDOWN;
633 if (!(in_dev->dev->flags & IFF_UP))
634 goto out;
635 nh->nh_dev = in_dev->dev;
636 dev_hold(nh->nh_dev);
637 nh->nh_scope = RT_SCOPE_HOST;
638 err = 0;
640 out:
641 rcu_read_unlock();
642 return err;
645 static inline unsigned int fib_laddr_hashfn(__be32 val)
647 unsigned int mask = (fib_info_hash_size - 1);
649 return ((__force u32)val ^
650 ((__force u32)val >> 7) ^
651 ((__force u32)val >> 14)) & mask;
654 static struct hlist_head *fib_info_hash_alloc(int bytes)
656 if (bytes <= PAGE_SIZE)
657 return kzalloc(bytes, GFP_KERNEL);
658 else
659 return (struct hlist_head *)
660 __get_free_pages(GFP_KERNEL | __GFP_ZERO,
661 get_order(bytes));
664 static void fib_info_hash_free(struct hlist_head *hash, int bytes)
666 if (!hash)
667 return;
669 if (bytes <= PAGE_SIZE)
670 kfree(hash);
671 else
672 free_pages((unsigned long) hash, get_order(bytes));
675 static void fib_info_hash_move(struct hlist_head *new_info_hash,
676 struct hlist_head *new_laddrhash,
677 unsigned int new_size)
679 struct hlist_head *old_info_hash, *old_laddrhash;
680 unsigned int old_size = fib_info_hash_size;
681 unsigned int i, bytes;
683 spin_lock_bh(&fib_info_lock);
684 old_info_hash = fib_info_hash;
685 old_laddrhash = fib_info_laddrhash;
686 fib_info_hash_size = new_size;
688 for (i = 0; i < old_size; i++) {
689 struct hlist_head *head = &fib_info_hash[i];
690 struct hlist_node *node, *n;
691 struct fib_info *fi;
693 hlist_for_each_entry_safe(fi, node, n, head, fib_hash) {
694 struct hlist_head *dest;
695 unsigned int new_hash;
697 hlist_del(&fi->fib_hash);
699 new_hash = fib_info_hashfn(fi);
700 dest = &new_info_hash[new_hash];
701 hlist_add_head(&fi->fib_hash, dest);
704 fib_info_hash = new_info_hash;
706 for (i = 0; i < old_size; i++) {
707 struct hlist_head *lhead = &fib_info_laddrhash[i];
708 struct hlist_node *node, *n;
709 struct fib_info *fi;
711 hlist_for_each_entry_safe(fi, node, n, lhead, fib_lhash) {
712 struct hlist_head *ldest;
713 unsigned int new_hash;
715 hlist_del(&fi->fib_lhash);
717 new_hash = fib_laddr_hashfn(fi->fib_prefsrc);
718 ldest = &new_laddrhash[new_hash];
719 hlist_add_head(&fi->fib_lhash, ldest);
722 fib_info_laddrhash = new_laddrhash;
724 spin_unlock_bh(&fib_info_lock);
726 bytes = old_size * sizeof(struct hlist_head *);
727 fib_info_hash_free(old_info_hash, bytes);
728 fib_info_hash_free(old_laddrhash, bytes);
731 __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
733 nh->nh_saddr = inet_select_addr(nh->nh_dev,
734 nh->nh_gw,
735 nh->nh_parent->fib_scope);
736 nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
738 return nh->nh_saddr;
741 struct fib_info *fib_create_info(struct fib_config *cfg)
743 int err;
744 struct fib_info *fi = NULL;
745 struct fib_info *ofi;
746 int nhs = 1;
747 struct net *net = cfg->fc_nlinfo.nl_net;
749 if (cfg->fc_type > RTN_MAX)
750 goto err_inval;
752 /* Fast check to catch the most weird cases */
753 if (fib_props[cfg->fc_type].scope > cfg->fc_scope)
754 goto err_inval;
756 #ifdef CONFIG_IP_ROUTE_MULTIPATH
757 if (cfg->fc_mp) {
758 nhs = fib_count_nexthops(cfg->fc_mp, cfg->fc_mp_len);
759 if (nhs == 0)
760 goto err_inval;
762 #endif
764 err = -ENOBUFS;
765 if (fib_info_cnt >= fib_info_hash_size) {
766 unsigned int new_size = fib_info_hash_size << 1;
767 struct hlist_head *new_info_hash;
768 struct hlist_head *new_laddrhash;
769 unsigned int bytes;
771 if (!new_size)
772 new_size = 1;
773 bytes = new_size * sizeof(struct hlist_head *);
774 new_info_hash = fib_info_hash_alloc(bytes);
775 new_laddrhash = fib_info_hash_alloc(bytes);
776 if (!new_info_hash || !new_laddrhash) {
777 fib_info_hash_free(new_info_hash, bytes);
778 fib_info_hash_free(new_laddrhash, bytes);
779 } else
780 fib_info_hash_move(new_info_hash, new_laddrhash, new_size);
782 if (!fib_info_hash_size)
783 goto failure;
786 fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL);
787 if (fi == NULL)
788 goto failure;
789 if (cfg->fc_mx) {
790 fi->fib_metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL);
791 if (!fi->fib_metrics)
792 goto failure;
793 } else
794 fi->fib_metrics = (u32 *) dst_default_metrics;
795 fib_info_cnt++;
797 fi->fib_net = hold_net(net);
798 fi->fib_protocol = cfg->fc_protocol;
799 fi->fib_scope = cfg->fc_scope;
800 fi->fib_flags = cfg->fc_flags;
801 fi->fib_priority = cfg->fc_priority;
802 fi->fib_prefsrc = cfg->fc_prefsrc;
804 fi->fib_nhs = nhs;
805 change_nexthops(fi) {
806 nexthop_nh->nh_parent = fi;
807 } endfor_nexthops(fi)
809 if (cfg->fc_mx) {
810 struct nlattr *nla;
811 int remaining;
813 nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) {
814 int type = nla_type(nla);
816 if (type) {
817 u32 val;
819 if (type > RTAX_MAX)
820 goto err_inval;
821 val = nla_get_u32(nla);
822 if (type == RTAX_ADVMSS && val > 65535 - 40)
823 val = 65535 - 40;
824 if (type == RTAX_MTU && val > 65535 - 15)
825 val = 65535 - 15;
826 fi->fib_metrics[type - 1] = val;
831 if (cfg->fc_mp) {
832 #ifdef CONFIG_IP_ROUTE_MULTIPATH
833 err = fib_get_nhs(fi, cfg->fc_mp, cfg->fc_mp_len, cfg);
834 if (err != 0)
835 goto failure;
836 if (cfg->fc_oif && fi->fib_nh->nh_oif != cfg->fc_oif)
837 goto err_inval;
838 if (cfg->fc_gw && fi->fib_nh->nh_gw != cfg->fc_gw)
839 goto err_inval;
840 #ifdef CONFIG_IP_ROUTE_CLASSID
841 if (cfg->fc_flow && fi->fib_nh->nh_tclassid != cfg->fc_flow)
842 goto err_inval;
843 #endif
844 #else
845 goto err_inval;
846 #endif
847 } else {
848 struct fib_nh *nh = fi->fib_nh;
850 nh->nh_oif = cfg->fc_oif;
851 nh->nh_gw = cfg->fc_gw;
852 nh->nh_flags = cfg->fc_flags;
853 #ifdef CONFIG_IP_ROUTE_CLASSID
854 nh->nh_tclassid = cfg->fc_flow;
855 if (nh->nh_tclassid)
856 fi->fib_net->ipv4.fib_num_tclassid_users++;
857 #endif
858 #ifdef CONFIG_IP_ROUTE_MULTIPATH
859 nh->nh_weight = 1;
860 #endif
863 if (fib_props[cfg->fc_type].error) {
864 if (cfg->fc_gw || cfg->fc_oif || cfg->fc_mp)
865 goto err_inval;
866 goto link_it;
867 } else {
868 switch (cfg->fc_type) {
869 case RTN_UNICAST:
870 case RTN_LOCAL:
871 case RTN_BROADCAST:
872 case RTN_ANYCAST:
873 case RTN_MULTICAST:
874 break;
875 default:
876 goto err_inval;
880 if (cfg->fc_scope > RT_SCOPE_HOST)
881 goto err_inval;
883 if (cfg->fc_scope == RT_SCOPE_HOST) {
884 struct fib_nh *nh = fi->fib_nh;
886 /* Local address is added. */
887 if (nhs != 1 || nh->nh_gw)
888 goto err_inval;
889 nh->nh_scope = RT_SCOPE_NOWHERE;
890 nh->nh_dev = dev_get_by_index(net, fi->fib_nh->nh_oif);
891 err = -ENODEV;
892 if (nh->nh_dev == NULL)
893 goto failure;
894 } else {
895 change_nexthops(fi) {
896 err = fib_check_nh(cfg, fi, nexthop_nh);
897 if (err != 0)
898 goto failure;
899 } endfor_nexthops(fi)
902 if (fi->fib_prefsrc) {
903 if (cfg->fc_type != RTN_LOCAL || !cfg->fc_dst ||
904 fi->fib_prefsrc != cfg->fc_dst)
905 if (inet_addr_type(net, fi->fib_prefsrc) != RTN_LOCAL)
906 goto err_inval;
909 change_nexthops(fi) {
910 fib_info_update_nh_saddr(net, nexthop_nh);
911 } endfor_nexthops(fi)
913 link_it:
914 ofi = fib_find_info(fi);
915 if (ofi) {
916 fi->fib_dead = 1;
917 free_fib_info(fi);
918 ofi->fib_treeref++;
919 return ofi;
922 fi->fib_treeref++;
923 atomic_inc(&fi->fib_clntref);
924 spin_lock_bh(&fib_info_lock);
925 hlist_add_head(&fi->fib_hash,
926 &fib_info_hash[fib_info_hashfn(fi)]);
927 if (fi->fib_prefsrc) {
928 struct hlist_head *head;
930 head = &fib_info_laddrhash[fib_laddr_hashfn(fi->fib_prefsrc)];
931 hlist_add_head(&fi->fib_lhash, head);
933 change_nexthops(fi) {
934 struct hlist_head *head;
935 unsigned int hash;
937 if (!nexthop_nh->nh_dev)
938 continue;
939 hash = fib_devindex_hashfn(nexthop_nh->nh_dev->ifindex);
940 head = &fib_info_devhash[hash];
941 hlist_add_head(&nexthop_nh->nh_hash, head);
942 } endfor_nexthops(fi)
943 spin_unlock_bh(&fib_info_lock);
944 return fi;
946 err_inval:
947 err = -EINVAL;
949 failure:
950 if (fi) {
951 fi->fib_dead = 1;
952 free_fib_info(fi);
955 return ERR_PTR(err);
958 int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
959 u32 tb_id, u8 type, __be32 dst, int dst_len, u8 tos,
960 struct fib_info *fi, unsigned int flags)
962 struct nlmsghdr *nlh;
963 struct rtmsg *rtm;
965 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*rtm), flags);
966 if (nlh == NULL)
967 return -EMSGSIZE;
969 rtm = nlmsg_data(nlh);
970 rtm->rtm_family = AF_INET;
971 rtm->rtm_dst_len = dst_len;
972 rtm->rtm_src_len = 0;
973 rtm->rtm_tos = tos;
974 if (tb_id < 256)
975 rtm->rtm_table = tb_id;
976 else
977 rtm->rtm_table = RT_TABLE_COMPAT;
978 if (nla_put_u32(skb, RTA_TABLE, tb_id))
979 goto nla_put_failure;
980 rtm->rtm_type = type;
981 rtm->rtm_flags = fi->fib_flags;
982 rtm->rtm_scope = fi->fib_scope;
983 rtm->rtm_protocol = fi->fib_protocol;
985 if (rtm->rtm_dst_len &&
986 nla_put_be32(skb, RTA_DST, dst))
987 goto nla_put_failure;
988 if (fi->fib_priority &&
989 nla_put_u32(skb, RTA_PRIORITY, fi->fib_priority))
990 goto nla_put_failure;
991 if (rtnetlink_put_metrics(skb, fi->fib_metrics) < 0)
992 goto nla_put_failure;
994 if (fi->fib_prefsrc &&
995 nla_put_be32(skb, RTA_PREFSRC, fi->fib_prefsrc))
996 goto nla_put_failure;
997 if (fi->fib_nhs == 1) {
998 if (fi->fib_nh->nh_gw &&
999 nla_put_be32(skb, RTA_GATEWAY, fi->fib_nh->nh_gw))
1000 goto nla_put_failure;
1001 if (fi->fib_nh->nh_oif &&
1002 nla_put_u32(skb, RTA_OIF, fi->fib_nh->nh_oif))
1003 goto nla_put_failure;
1004 #ifdef CONFIG_IP_ROUTE_CLASSID
1005 if (fi->fib_nh[0].nh_tclassid &&
1006 nla_put_u32(skb, RTA_FLOW, fi->fib_nh[0].nh_tclassid))
1007 goto nla_put_failure;
1008 #endif
1010 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1011 if (fi->fib_nhs > 1) {
1012 struct rtnexthop *rtnh;
1013 struct nlattr *mp;
1015 mp = nla_nest_start(skb, RTA_MULTIPATH);
1016 if (mp == NULL)
1017 goto nla_put_failure;
1019 for_nexthops(fi) {
1020 rtnh = nla_reserve_nohdr(skb, sizeof(*rtnh));
1021 if (rtnh == NULL)
1022 goto nla_put_failure;
1024 rtnh->rtnh_flags = nh->nh_flags & 0xFF;
1025 rtnh->rtnh_hops = nh->nh_weight - 1;
1026 rtnh->rtnh_ifindex = nh->nh_oif;
1028 if (nh->nh_gw &&
1029 nla_put_be32(skb, RTA_GATEWAY, nh->nh_gw))
1030 goto nla_put_failure;
1031 #ifdef CONFIG_IP_ROUTE_CLASSID
1032 if (nh->nh_tclassid &&
1033 nla_put_u32(skb, RTA_FLOW, nh->nh_tclassid))
1034 goto nla_put_failure;
1035 #endif
1036 /* length of rtnetlink header + attributes */
1037 rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *) rtnh;
1038 } endfor_nexthops(fi);
1040 nla_nest_end(skb, mp);
1042 #endif
1043 return nlmsg_end(skb, nlh);
1045 nla_put_failure:
1046 nlmsg_cancel(skb, nlh);
1047 return -EMSGSIZE;
1051 * Update FIB if:
1052 * - local address disappeared -> we must delete all the entries
1053 * referring to it.
1054 * - device went down -> we must shutdown all nexthops going via it.
1056 int fib_sync_down_addr(struct net *net, __be32 local)
1058 int ret = 0;
1059 unsigned int hash = fib_laddr_hashfn(local);
1060 struct hlist_head *head = &fib_info_laddrhash[hash];
1061 struct hlist_node *node;
1062 struct fib_info *fi;
1064 if (fib_info_laddrhash == NULL || local == 0)
1065 return 0;
1067 hlist_for_each_entry(fi, node, head, fib_lhash) {
1068 if (!net_eq(fi->fib_net, net))
1069 continue;
1070 if (fi->fib_prefsrc == local) {
1071 fi->fib_flags |= RTNH_F_DEAD;
1072 ret++;
1075 return ret;
1078 int fib_sync_down_dev(struct net_device *dev, int force)
1080 int ret = 0;
1081 int scope = RT_SCOPE_NOWHERE;
1082 struct fib_info *prev_fi = NULL;
1083 unsigned int hash = fib_devindex_hashfn(dev->ifindex);
1084 struct hlist_head *head = &fib_info_devhash[hash];
1085 struct hlist_node *node;
1086 struct fib_nh *nh;
1088 if (force)
1089 scope = -1;
1091 hlist_for_each_entry(nh, node, head, nh_hash) {
1092 struct fib_info *fi = nh->nh_parent;
1093 int dead;
1095 BUG_ON(!fi->fib_nhs);
1096 if (nh->nh_dev != dev || fi == prev_fi)
1097 continue;
1098 prev_fi = fi;
1099 dead = 0;
1100 change_nexthops(fi) {
1101 if (nexthop_nh->nh_flags & RTNH_F_DEAD)
1102 dead++;
1103 else if (nexthop_nh->nh_dev == dev &&
1104 nexthop_nh->nh_scope != scope) {
1105 nexthop_nh->nh_flags |= RTNH_F_DEAD;
1106 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1107 spin_lock_bh(&fib_multipath_lock);
1108 fi->fib_power -= nexthop_nh->nh_power;
1109 nexthop_nh->nh_power = 0;
1110 spin_unlock_bh(&fib_multipath_lock);
1111 #endif
1112 dead++;
1114 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1115 if (force > 1 && nexthop_nh->nh_dev == dev) {
1116 dead = fi->fib_nhs;
1117 break;
1119 #endif
1120 } endfor_nexthops(fi)
1121 if (dead == fi->fib_nhs) {
1122 fi->fib_flags |= RTNH_F_DEAD;
1123 ret++;
1127 return ret;
1130 /* Must be invoked inside of an RCU protected region. */
1131 void fib_select_default(struct fib_result *res)
1133 struct fib_info *fi = NULL, *last_resort = NULL;
1134 struct list_head *fa_head = res->fa_head;
1135 struct fib_table *tb = res->table;
1136 int order = -1, last_idx = -1;
1137 struct fib_alias *fa;
1139 list_for_each_entry_rcu(fa, fa_head, fa_list) {
1140 struct fib_info *next_fi = fa->fa_info;
1142 if (next_fi->fib_scope != res->scope ||
1143 fa->fa_type != RTN_UNICAST)
1144 continue;
1146 if (next_fi->fib_priority > res->fi->fib_priority)
1147 break;
1148 if (!next_fi->fib_nh[0].nh_gw ||
1149 next_fi->fib_nh[0].nh_scope != RT_SCOPE_LINK)
1150 continue;
1152 fib_alias_accessed(fa);
1154 if (fi == NULL) {
1155 if (next_fi != res->fi)
1156 break;
1157 } else if (!fib_detect_death(fi, order, &last_resort,
1158 &last_idx, tb->tb_default)) {
1159 fib_result_assign(res, fi);
1160 tb->tb_default = order;
1161 goto out;
1163 fi = next_fi;
1164 order++;
1167 if (order <= 0 || fi == NULL) {
1168 tb->tb_default = -1;
1169 goto out;
1172 if (!fib_detect_death(fi, order, &last_resort, &last_idx,
1173 tb->tb_default)) {
1174 fib_result_assign(res, fi);
1175 tb->tb_default = order;
1176 goto out;
1179 if (last_idx >= 0)
1180 fib_result_assign(res, last_resort);
1181 tb->tb_default = last_idx;
1182 out:
1183 return;
1186 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1189 * Dead device goes up. We wake up dead nexthops.
1190 * It takes sense only on multipath routes.
1192 int fib_sync_up(struct net_device *dev)
1194 struct fib_info *prev_fi;
1195 unsigned int hash;
1196 struct hlist_head *head;
1197 struct hlist_node *node;
1198 struct fib_nh *nh;
1199 int ret;
1201 if (!(dev->flags & IFF_UP))
1202 return 0;
1204 prev_fi = NULL;
1205 hash = fib_devindex_hashfn(dev->ifindex);
1206 head = &fib_info_devhash[hash];
1207 ret = 0;
1209 hlist_for_each_entry(nh, node, head, nh_hash) {
1210 struct fib_info *fi = nh->nh_parent;
1211 int alive;
1213 BUG_ON(!fi->fib_nhs);
1214 if (nh->nh_dev != dev || fi == prev_fi)
1215 continue;
1217 prev_fi = fi;
1218 alive = 0;
1219 change_nexthops(fi) {
1220 if (!(nexthop_nh->nh_flags & RTNH_F_DEAD)) {
1221 alive++;
1222 continue;
1224 if (nexthop_nh->nh_dev == NULL ||
1225 !(nexthop_nh->nh_dev->flags & IFF_UP))
1226 continue;
1227 if (nexthop_nh->nh_dev != dev ||
1228 !__in_dev_get_rtnl(dev))
1229 continue;
1230 alive++;
1231 spin_lock_bh(&fib_multipath_lock);
1232 nexthop_nh->nh_power = 0;
1233 nexthop_nh->nh_flags &= ~RTNH_F_DEAD;
1234 spin_unlock_bh(&fib_multipath_lock);
1235 } endfor_nexthops(fi)
1237 if (alive > 0) {
1238 fi->fib_flags &= ~RTNH_F_DEAD;
1239 ret++;
1243 return ret;
1247 * The algorithm is suboptimal, but it provides really
1248 * fair weighted route distribution.
1250 void fib_select_multipath(struct fib_result *res)
1252 struct fib_info *fi = res->fi;
1253 int w;
1255 spin_lock_bh(&fib_multipath_lock);
1256 if (fi->fib_power <= 0) {
1257 int power = 0;
1258 change_nexthops(fi) {
1259 if (!(nexthop_nh->nh_flags & RTNH_F_DEAD)) {
1260 power += nexthop_nh->nh_weight;
1261 nexthop_nh->nh_power = nexthop_nh->nh_weight;
1263 } endfor_nexthops(fi);
1264 fi->fib_power = power;
1265 if (power <= 0) {
1266 spin_unlock_bh(&fib_multipath_lock);
1267 /* Race condition: route has just become dead. */
1268 res->nh_sel = 0;
1269 return;
1274 /* w should be random number [0..fi->fib_power-1],
1275 * it is pretty bad approximation.
1278 w = jiffies % fi->fib_power;
1280 change_nexthops(fi) {
1281 if (!(nexthop_nh->nh_flags & RTNH_F_DEAD) &&
1282 nexthop_nh->nh_power) {
1283 w -= nexthop_nh->nh_power;
1284 if (w <= 0) {
1285 nexthop_nh->nh_power--;
1286 fi->fib_power--;
1287 res->nh_sel = nhsel;
1288 spin_unlock_bh(&fib_multipath_lock);
1289 return;
1292 } endfor_nexthops(fi);
1294 /* Race condition: route has just become dead. */
1295 res->nh_sel = 0;
1296 spin_unlock_bh(&fib_multipath_lock);
1298 #endif