ipv4: Use flowi4 in FIB layer.
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / ipv4 / fib_frontend.c
bloba373a259253c8c0d4b59e1898f8241d94a02efe4
1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * IPv4 Forwarding Information Base: FIB frontend.
8 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
16 #include <linux/module.h>
17 #include <asm/uaccess.h>
18 #include <asm/system.h>
19 #include <linux/bitops.h>
20 #include <linux/capability.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/mm.h>
24 #include <linux/string.h>
25 #include <linux/socket.h>
26 #include <linux/sockios.h>
27 #include <linux/errno.h>
28 #include <linux/in.h>
29 #include <linux/inet.h>
30 #include <linux/inetdevice.h>
31 #include <linux/netdevice.h>
32 #include <linux/if_addr.h>
33 #include <linux/if_arp.h>
34 #include <linux/skbuff.h>
35 #include <linux/init.h>
36 #include <linux/list.h>
37 #include <linux/slab.h>
39 #include <net/ip.h>
40 #include <net/protocol.h>
41 #include <net/route.h>
42 #include <net/tcp.h>
43 #include <net/sock.h>
44 #include <net/arp.h>
45 #include <net/ip_fib.h>
46 #include <net/rtnetlink.h>
48 #ifndef CONFIG_IP_MULTIPLE_TABLES
50 static int __net_init fib4_rules_init(struct net *net)
52 struct fib_table *local_table, *main_table;
54 local_table = fib_trie_table(RT_TABLE_LOCAL);
55 if (local_table == NULL)
56 return -ENOMEM;
58 main_table = fib_trie_table(RT_TABLE_MAIN);
59 if (main_table == NULL)
60 goto fail;
62 hlist_add_head_rcu(&local_table->tb_hlist,
63 &net->ipv4.fib_table_hash[TABLE_LOCAL_INDEX]);
64 hlist_add_head_rcu(&main_table->tb_hlist,
65 &net->ipv4.fib_table_hash[TABLE_MAIN_INDEX]);
66 return 0;
68 fail:
69 kfree(local_table);
70 return -ENOMEM;
72 #else
74 struct fib_table *fib_new_table(struct net *net, u32 id)
76 struct fib_table *tb;
77 unsigned int h;
79 if (id == 0)
80 id = RT_TABLE_MAIN;
81 tb = fib_get_table(net, id);
82 if (tb)
83 return tb;
85 tb = fib_trie_table(id);
86 if (!tb)
87 return NULL;
88 h = id & (FIB_TABLE_HASHSZ - 1);
89 hlist_add_head_rcu(&tb->tb_hlist, &net->ipv4.fib_table_hash[h]);
90 return tb;
93 struct fib_table *fib_get_table(struct net *net, u32 id)
95 struct fib_table *tb;
96 struct hlist_node *node;
97 struct hlist_head *head;
98 unsigned int h;
100 if (id == 0)
101 id = RT_TABLE_MAIN;
102 h = id & (FIB_TABLE_HASHSZ - 1);
104 rcu_read_lock();
105 head = &net->ipv4.fib_table_hash[h];
106 hlist_for_each_entry_rcu(tb, node, head, tb_hlist) {
107 if (tb->tb_id == id) {
108 rcu_read_unlock();
109 return tb;
112 rcu_read_unlock();
113 return NULL;
115 #endif /* CONFIG_IP_MULTIPLE_TABLES */
117 static void fib_flush(struct net *net)
119 int flushed = 0;
120 struct fib_table *tb;
121 struct hlist_node *node;
122 struct hlist_head *head;
123 unsigned int h;
125 for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
126 head = &net->ipv4.fib_table_hash[h];
127 hlist_for_each_entry(tb, node, head, tb_hlist)
128 flushed += fib_table_flush(tb);
131 if (flushed)
132 rt_cache_flush(net, -1);
136 * Find address type as if only "dev" was present in the system. If
137 * on_dev is NULL then all interfaces are taken into consideration.
139 static inline unsigned __inet_dev_addr_type(struct net *net,
140 const struct net_device *dev,
141 __be32 addr)
143 struct flowi4 fl4 = { .daddr = addr };
144 struct fib_result res;
145 unsigned ret = RTN_BROADCAST;
146 struct fib_table *local_table;
148 if (ipv4_is_zeronet(addr) || ipv4_is_lbcast(addr))
149 return RTN_BROADCAST;
150 if (ipv4_is_multicast(addr))
151 return RTN_MULTICAST;
153 #ifdef CONFIG_IP_MULTIPLE_TABLES
154 res.r = NULL;
155 #endif
157 local_table = fib_get_table(net, RT_TABLE_LOCAL);
158 if (local_table) {
159 ret = RTN_UNICAST;
160 rcu_read_lock();
161 if (!fib_table_lookup(local_table, &fl4, &res, FIB_LOOKUP_NOREF)) {
162 if (!dev || dev == res.fi->fib_dev)
163 ret = res.type;
165 rcu_read_unlock();
167 return ret;
170 unsigned int inet_addr_type(struct net *net, __be32 addr)
172 return __inet_dev_addr_type(net, NULL, addr);
174 EXPORT_SYMBOL(inet_addr_type);
176 unsigned int inet_dev_addr_type(struct net *net, const struct net_device *dev,
177 __be32 addr)
179 return __inet_dev_addr_type(net, dev, addr);
181 EXPORT_SYMBOL(inet_dev_addr_type);
183 /* Given (packet source, input interface) and optional (dst, oif, tos):
184 * - (main) check, that source is valid i.e. not broadcast or our local
185 * address.
186 * - figure out what "logical" interface this packet arrived
187 * and calculate "specific destination" address.
188 * - check, that packet arrived from expected physical interface.
189 * called with rcu_read_lock()
191 int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif,
192 struct net_device *dev, __be32 *spec_dst,
193 u32 *itag, u32 mark)
195 struct in_device *in_dev;
196 struct flowi4 fl4;
197 struct fib_result res;
198 int no_addr, rpf, accept_local;
199 bool dev_match;
200 int ret;
201 struct net *net;
203 fl4.flowi4_oif = 0;
204 fl4.flowi4_iif = oif;
205 fl4.flowi4_mark = mark;
206 fl4.daddr = src;
207 fl4.saddr = dst;
208 fl4.flowi4_tos = tos;
209 fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
211 no_addr = rpf = accept_local = 0;
212 in_dev = __in_dev_get_rcu(dev);
213 if (in_dev) {
214 no_addr = in_dev->ifa_list == NULL;
215 rpf = IN_DEV_RPFILTER(in_dev);
216 accept_local = IN_DEV_ACCEPT_LOCAL(in_dev);
217 if (mark && !IN_DEV_SRC_VMARK(in_dev))
218 fl4.flowi4_mark = 0;
221 if (in_dev == NULL)
222 goto e_inval;
224 net = dev_net(dev);
225 if (fib_lookup(net, &fl4, &res))
226 goto last_resort;
227 if (res.type != RTN_UNICAST) {
228 if (res.type != RTN_LOCAL || !accept_local)
229 goto e_inval;
231 *spec_dst = FIB_RES_PREFSRC(res);
232 fib_combine_itag(itag, &res);
233 dev_match = false;
235 #ifdef CONFIG_IP_ROUTE_MULTIPATH
236 for (ret = 0; ret < res.fi->fib_nhs; ret++) {
237 struct fib_nh *nh = &res.fi->fib_nh[ret];
239 if (nh->nh_dev == dev) {
240 dev_match = true;
241 break;
244 #else
245 if (FIB_RES_DEV(res) == dev)
246 dev_match = true;
247 #endif
248 if (dev_match) {
249 ret = FIB_RES_NH(res).nh_scope >= RT_SCOPE_HOST;
250 return ret;
252 if (no_addr)
253 goto last_resort;
254 if (rpf == 1)
255 goto e_rpf;
256 fl4.flowi4_oif = dev->ifindex;
258 ret = 0;
259 if (fib_lookup(net, &fl4, &res) == 0) {
260 if (res.type == RTN_UNICAST) {
261 *spec_dst = FIB_RES_PREFSRC(res);
262 ret = FIB_RES_NH(res).nh_scope >= RT_SCOPE_HOST;
265 return ret;
267 last_resort:
268 if (rpf)
269 goto e_rpf;
270 *spec_dst = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE);
271 *itag = 0;
272 return 0;
274 e_inval:
275 return -EINVAL;
276 e_rpf:
277 return -EXDEV;
280 static inline __be32 sk_extract_addr(struct sockaddr *addr)
282 return ((struct sockaddr_in *) addr)->sin_addr.s_addr;
285 static int put_rtax(struct nlattr *mx, int len, int type, u32 value)
287 struct nlattr *nla;
289 nla = (struct nlattr *) ((char *) mx + len);
290 nla->nla_type = type;
291 nla->nla_len = nla_attr_size(4);
292 *(u32 *) nla_data(nla) = value;
294 return len + nla_total_size(4);
297 static int rtentry_to_fib_config(struct net *net, int cmd, struct rtentry *rt,
298 struct fib_config *cfg)
300 __be32 addr;
301 int plen;
303 memset(cfg, 0, sizeof(*cfg));
304 cfg->fc_nlinfo.nl_net = net;
306 if (rt->rt_dst.sa_family != AF_INET)
307 return -EAFNOSUPPORT;
310 * Check mask for validity:
311 * a) it must be contiguous.
312 * b) destination must have all host bits clear.
313 * c) if application forgot to set correct family (AF_INET),
314 * reject request unless it is absolutely clear i.e.
315 * both family and mask are zero.
317 plen = 32;
318 addr = sk_extract_addr(&rt->rt_dst);
319 if (!(rt->rt_flags & RTF_HOST)) {
320 __be32 mask = sk_extract_addr(&rt->rt_genmask);
322 if (rt->rt_genmask.sa_family != AF_INET) {
323 if (mask || rt->rt_genmask.sa_family)
324 return -EAFNOSUPPORT;
327 if (bad_mask(mask, addr))
328 return -EINVAL;
330 plen = inet_mask_len(mask);
333 cfg->fc_dst_len = plen;
334 cfg->fc_dst = addr;
336 if (cmd != SIOCDELRT) {
337 cfg->fc_nlflags = NLM_F_CREATE;
338 cfg->fc_protocol = RTPROT_BOOT;
341 if (rt->rt_metric)
342 cfg->fc_priority = rt->rt_metric - 1;
344 if (rt->rt_flags & RTF_REJECT) {
345 cfg->fc_scope = RT_SCOPE_HOST;
346 cfg->fc_type = RTN_UNREACHABLE;
347 return 0;
350 cfg->fc_scope = RT_SCOPE_NOWHERE;
351 cfg->fc_type = RTN_UNICAST;
353 if (rt->rt_dev) {
354 char *colon;
355 struct net_device *dev;
356 char devname[IFNAMSIZ];
358 if (copy_from_user(devname, rt->rt_dev, IFNAMSIZ-1))
359 return -EFAULT;
361 devname[IFNAMSIZ-1] = 0;
362 colon = strchr(devname, ':');
363 if (colon)
364 *colon = 0;
365 dev = __dev_get_by_name(net, devname);
366 if (!dev)
367 return -ENODEV;
368 cfg->fc_oif = dev->ifindex;
369 if (colon) {
370 struct in_ifaddr *ifa;
371 struct in_device *in_dev = __in_dev_get_rtnl(dev);
372 if (!in_dev)
373 return -ENODEV;
374 *colon = ':';
375 for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next)
376 if (strcmp(ifa->ifa_label, devname) == 0)
377 break;
378 if (ifa == NULL)
379 return -ENODEV;
380 cfg->fc_prefsrc = ifa->ifa_local;
384 addr = sk_extract_addr(&rt->rt_gateway);
385 if (rt->rt_gateway.sa_family == AF_INET && addr) {
386 cfg->fc_gw = addr;
387 if (rt->rt_flags & RTF_GATEWAY &&
388 inet_addr_type(net, addr) == RTN_UNICAST)
389 cfg->fc_scope = RT_SCOPE_UNIVERSE;
392 if (cmd == SIOCDELRT)
393 return 0;
395 if (rt->rt_flags & RTF_GATEWAY && !cfg->fc_gw)
396 return -EINVAL;
398 if (cfg->fc_scope == RT_SCOPE_NOWHERE)
399 cfg->fc_scope = RT_SCOPE_LINK;
401 if (rt->rt_flags & (RTF_MTU | RTF_WINDOW | RTF_IRTT)) {
402 struct nlattr *mx;
403 int len = 0;
405 mx = kzalloc(3 * nla_total_size(4), GFP_KERNEL);
406 if (mx == NULL)
407 return -ENOMEM;
409 if (rt->rt_flags & RTF_MTU)
410 len = put_rtax(mx, len, RTAX_ADVMSS, rt->rt_mtu - 40);
412 if (rt->rt_flags & RTF_WINDOW)
413 len = put_rtax(mx, len, RTAX_WINDOW, rt->rt_window);
415 if (rt->rt_flags & RTF_IRTT)
416 len = put_rtax(mx, len, RTAX_RTT, rt->rt_irtt << 3);
418 cfg->fc_mx = mx;
419 cfg->fc_mx_len = len;
422 return 0;
426 * Handle IP routing ioctl calls.
427 * These are used to manipulate the routing tables
429 int ip_rt_ioctl(struct net *net, unsigned int cmd, void __user *arg)
431 struct fib_config cfg;
432 struct rtentry rt;
433 int err;
435 switch (cmd) {
436 case SIOCADDRT: /* Add a route */
437 case SIOCDELRT: /* Delete a route */
438 if (!capable(CAP_NET_ADMIN))
439 return -EPERM;
441 if (copy_from_user(&rt, arg, sizeof(rt)))
442 return -EFAULT;
444 rtnl_lock();
445 err = rtentry_to_fib_config(net, cmd, &rt, &cfg);
446 if (err == 0) {
447 struct fib_table *tb;
449 if (cmd == SIOCDELRT) {
450 tb = fib_get_table(net, cfg.fc_table);
451 if (tb)
452 err = fib_table_delete(tb, &cfg);
453 else
454 err = -ESRCH;
455 } else {
456 tb = fib_new_table(net, cfg.fc_table);
457 if (tb)
458 err = fib_table_insert(tb, &cfg);
459 else
460 err = -ENOBUFS;
463 /* allocated by rtentry_to_fib_config() */
464 kfree(cfg.fc_mx);
466 rtnl_unlock();
467 return err;
469 return -EINVAL;
472 const struct nla_policy rtm_ipv4_policy[RTA_MAX + 1] = {
473 [RTA_DST] = { .type = NLA_U32 },
474 [RTA_SRC] = { .type = NLA_U32 },
475 [RTA_IIF] = { .type = NLA_U32 },
476 [RTA_OIF] = { .type = NLA_U32 },
477 [RTA_GATEWAY] = { .type = NLA_U32 },
478 [RTA_PRIORITY] = { .type = NLA_U32 },
479 [RTA_PREFSRC] = { .type = NLA_U32 },
480 [RTA_METRICS] = { .type = NLA_NESTED },
481 [RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) },
482 [RTA_FLOW] = { .type = NLA_U32 },
485 static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
486 struct nlmsghdr *nlh, struct fib_config *cfg)
488 struct nlattr *attr;
489 int err, remaining;
490 struct rtmsg *rtm;
492 err = nlmsg_validate(nlh, sizeof(*rtm), RTA_MAX, rtm_ipv4_policy);
493 if (err < 0)
494 goto errout;
496 memset(cfg, 0, sizeof(*cfg));
498 rtm = nlmsg_data(nlh);
499 cfg->fc_dst_len = rtm->rtm_dst_len;
500 cfg->fc_tos = rtm->rtm_tos;
501 cfg->fc_table = rtm->rtm_table;
502 cfg->fc_protocol = rtm->rtm_protocol;
503 cfg->fc_scope = rtm->rtm_scope;
504 cfg->fc_type = rtm->rtm_type;
505 cfg->fc_flags = rtm->rtm_flags;
506 cfg->fc_nlflags = nlh->nlmsg_flags;
508 cfg->fc_nlinfo.pid = NETLINK_CB(skb).pid;
509 cfg->fc_nlinfo.nlh = nlh;
510 cfg->fc_nlinfo.nl_net = net;
512 if (cfg->fc_type > RTN_MAX) {
513 err = -EINVAL;
514 goto errout;
517 nlmsg_for_each_attr(attr, nlh, sizeof(struct rtmsg), remaining) {
518 switch (nla_type(attr)) {
519 case RTA_DST:
520 cfg->fc_dst = nla_get_be32(attr);
521 break;
522 case RTA_OIF:
523 cfg->fc_oif = nla_get_u32(attr);
524 break;
525 case RTA_GATEWAY:
526 cfg->fc_gw = nla_get_be32(attr);
527 break;
528 case RTA_PRIORITY:
529 cfg->fc_priority = nla_get_u32(attr);
530 break;
531 case RTA_PREFSRC:
532 cfg->fc_prefsrc = nla_get_be32(attr);
533 break;
534 case RTA_METRICS:
535 cfg->fc_mx = nla_data(attr);
536 cfg->fc_mx_len = nla_len(attr);
537 break;
538 case RTA_MULTIPATH:
539 cfg->fc_mp = nla_data(attr);
540 cfg->fc_mp_len = nla_len(attr);
541 break;
542 case RTA_FLOW:
543 cfg->fc_flow = nla_get_u32(attr);
544 break;
545 case RTA_TABLE:
546 cfg->fc_table = nla_get_u32(attr);
547 break;
551 return 0;
552 errout:
553 return err;
556 static int inet_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
558 struct net *net = sock_net(skb->sk);
559 struct fib_config cfg;
560 struct fib_table *tb;
561 int err;
563 err = rtm_to_fib_config(net, skb, nlh, &cfg);
564 if (err < 0)
565 goto errout;
567 tb = fib_get_table(net, cfg.fc_table);
568 if (tb == NULL) {
569 err = -ESRCH;
570 goto errout;
573 err = fib_table_delete(tb, &cfg);
574 errout:
575 return err;
578 static int inet_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
580 struct net *net = sock_net(skb->sk);
581 struct fib_config cfg;
582 struct fib_table *tb;
583 int err;
585 err = rtm_to_fib_config(net, skb, nlh, &cfg);
586 if (err < 0)
587 goto errout;
589 tb = fib_new_table(net, cfg.fc_table);
590 if (tb == NULL) {
591 err = -ENOBUFS;
592 goto errout;
595 err = fib_table_insert(tb, &cfg);
596 errout:
597 return err;
600 static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
602 struct net *net = sock_net(skb->sk);
603 unsigned int h, s_h;
604 unsigned int e = 0, s_e;
605 struct fib_table *tb;
606 struct hlist_node *node;
607 struct hlist_head *head;
608 int dumped = 0;
610 if (nlmsg_len(cb->nlh) >= sizeof(struct rtmsg) &&
611 ((struct rtmsg *) nlmsg_data(cb->nlh))->rtm_flags & RTM_F_CLONED)
612 return ip_rt_dump(skb, cb);
614 s_h = cb->args[0];
615 s_e = cb->args[1];
617 for (h = s_h; h < FIB_TABLE_HASHSZ; h++, s_e = 0) {
618 e = 0;
619 head = &net->ipv4.fib_table_hash[h];
620 hlist_for_each_entry(tb, node, head, tb_hlist) {
621 if (e < s_e)
622 goto next;
623 if (dumped)
624 memset(&cb->args[2], 0, sizeof(cb->args) -
625 2 * sizeof(cb->args[0]));
626 if (fib_table_dump(tb, skb, cb) < 0)
627 goto out;
628 dumped = 1;
629 next:
630 e++;
633 out:
634 cb->args[1] = e;
635 cb->args[0] = h;
637 return skb->len;
640 /* Prepare and feed intra-kernel routing request.
641 * Really, it should be netlink message, but :-( netlink
642 * can be not configured, so that we feed it directly
643 * to fib engine. It is legal, because all events occur
644 * only when netlink is already locked.
646 static void fib_magic(int cmd, int type, __be32 dst, int dst_len, struct in_ifaddr *ifa)
648 struct net *net = dev_net(ifa->ifa_dev->dev);
649 struct fib_table *tb;
650 struct fib_config cfg = {
651 .fc_protocol = RTPROT_KERNEL,
652 .fc_type = type,
653 .fc_dst = dst,
654 .fc_dst_len = dst_len,
655 .fc_prefsrc = ifa->ifa_local,
656 .fc_oif = ifa->ifa_dev->dev->ifindex,
657 .fc_nlflags = NLM_F_CREATE | NLM_F_APPEND,
658 .fc_nlinfo = {
659 .nl_net = net,
663 if (type == RTN_UNICAST)
664 tb = fib_new_table(net, RT_TABLE_MAIN);
665 else
666 tb = fib_new_table(net, RT_TABLE_LOCAL);
668 if (tb == NULL)
669 return;
671 cfg.fc_table = tb->tb_id;
673 if (type != RTN_LOCAL)
674 cfg.fc_scope = RT_SCOPE_LINK;
675 else
676 cfg.fc_scope = RT_SCOPE_HOST;
678 if (cmd == RTM_NEWROUTE)
679 fib_table_insert(tb, &cfg);
680 else
681 fib_table_delete(tb, &cfg);
684 void fib_add_ifaddr(struct in_ifaddr *ifa)
686 struct in_device *in_dev = ifa->ifa_dev;
687 struct net_device *dev = in_dev->dev;
688 struct in_ifaddr *prim = ifa;
689 __be32 mask = ifa->ifa_mask;
690 __be32 addr = ifa->ifa_local;
691 __be32 prefix = ifa->ifa_address & mask;
693 if (ifa->ifa_flags & IFA_F_SECONDARY) {
694 prim = inet_ifa_byprefix(in_dev, prefix, mask);
695 if (prim == NULL) {
696 printk(KERN_WARNING "fib_add_ifaddr: bug: prim == NULL\n");
697 return;
701 fib_magic(RTM_NEWROUTE, RTN_LOCAL, addr, 32, prim);
703 if (!(dev->flags & IFF_UP))
704 return;
706 /* Add broadcast address, if it is explicitly assigned. */
707 if (ifa->ifa_broadcast && ifa->ifa_broadcast != htonl(0xFFFFFFFF))
708 fib_magic(RTM_NEWROUTE, RTN_BROADCAST, ifa->ifa_broadcast, 32, prim);
710 if (!ipv4_is_zeronet(prefix) && !(ifa->ifa_flags & IFA_F_SECONDARY) &&
711 (prefix != addr || ifa->ifa_prefixlen < 32)) {
712 fib_magic(RTM_NEWROUTE,
713 dev->flags & IFF_LOOPBACK ? RTN_LOCAL : RTN_UNICAST,
714 prefix, ifa->ifa_prefixlen, prim);
716 /* Add network specific broadcasts, when it takes a sense */
717 if (ifa->ifa_prefixlen < 31) {
718 fib_magic(RTM_NEWROUTE, RTN_BROADCAST, prefix, 32, prim);
719 fib_magic(RTM_NEWROUTE, RTN_BROADCAST, prefix | ~mask,
720 32, prim);
725 static void fib_del_ifaddr(struct in_ifaddr *ifa)
727 struct in_device *in_dev = ifa->ifa_dev;
728 struct net_device *dev = in_dev->dev;
729 struct in_ifaddr *ifa1;
730 struct in_ifaddr *prim = ifa;
731 __be32 brd = ifa->ifa_address | ~ifa->ifa_mask;
732 __be32 any = ifa->ifa_address & ifa->ifa_mask;
733 #define LOCAL_OK 1
734 #define BRD_OK 2
735 #define BRD0_OK 4
736 #define BRD1_OK 8
737 unsigned ok = 0;
739 if (!(ifa->ifa_flags & IFA_F_SECONDARY))
740 fib_magic(RTM_DELROUTE,
741 dev->flags & IFF_LOOPBACK ? RTN_LOCAL : RTN_UNICAST,
742 any, ifa->ifa_prefixlen, prim);
743 else {
744 prim = inet_ifa_byprefix(in_dev, any, ifa->ifa_mask);
745 if (prim == NULL) {
746 printk(KERN_WARNING "fib_del_ifaddr: bug: prim == NULL\n");
747 return;
751 /* Deletion is more complicated than add.
752 * We should take care of not to delete too much :-)
754 * Scan address list to be sure that addresses are really gone.
757 for (ifa1 = in_dev->ifa_list; ifa1; ifa1 = ifa1->ifa_next) {
758 if (ifa->ifa_local == ifa1->ifa_local)
759 ok |= LOCAL_OK;
760 if (ifa->ifa_broadcast == ifa1->ifa_broadcast)
761 ok |= BRD_OK;
762 if (brd == ifa1->ifa_broadcast)
763 ok |= BRD1_OK;
764 if (any == ifa1->ifa_broadcast)
765 ok |= BRD0_OK;
768 if (!(ok & BRD_OK))
769 fib_magic(RTM_DELROUTE, RTN_BROADCAST, ifa->ifa_broadcast, 32, prim);
770 if (!(ok & BRD1_OK))
771 fib_magic(RTM_DELROUTE, RTN_BROADCAST, brd, 32, prim);
772 if (!(ok & BRD0_OK))
773 fib_magic(RTM_DELROUTE, RTN_BROADCAST, any, 32, prim);
774 if (!(ok & LOCAL_OK)) {
775 fib_magic(RTM_DELROUTE, RTN_LOCAL, ifa->ifa_local, 32, prim);
777 /* Check, that this local address finally disappeared. */
778 if (inet_addr_type(dev_net(dev), ifa->ifa_local) != RTN_LOCAL) {
779 /* And the last, but not the least thing.
780 * We must flush stray FIB entries.
782 * First of all, we scan fib_info list searching
783 * for stray nexthop entries, then ignite fib_flush.
785 if (fib_sync_down_addr(dev_net(dev), ifa->ifa_local))
786 fib_flush(dev_net(dev));
789 #undef LOCAL_OK
790 #undef BRD_OK
791 #undef BRD0_OK
792 #undef BRD1_OK
795 static void nl_fib_lookup(struct fib_result_nl *frn, struct fib_table *tb)
798 struct fib_result res;
799 struct flowi4 fl4 = {
800 .flowi4_mark = frn->fl_mark,
801 .daddr = frn->fl_addr,
802 .flowi4_tos = frn->fl_tos,
803 .flowi4_scope = frn->fl_scope,
806 #ifdef CONFIG_IP_MULTIPLE_TABLES
807 res.r = NULL;
808 #endif
810 frn->err = -ENOENT;
811 if (tb) {
812 local_bh_disable();
814 frn->tb_id = tb->tb_id;
815 rcu_read_lock();
816 frn->err = fib_table_lookup(tb, &fl4, &res, FIB_LOOKUP_NOREF);
818 if (!frn->err) {
819 frn->prefixlen = res.prefixlen;
820 frn->nh_sel = res.nh_sel;
821 frn->type = res.type;
822 frn->scope = res.scope;
824 rcu_read_unlock();
825 local_bh_enable();
829 static void nl_fib_input(struct sk_buff *skb)
831 struct net *net;
832 struct fib_result_nl *frn;
833 struct nlmsghdr *nlh;
834 struct fib_table *tb;
835 u32 pid;
837 net = sock_net(skb->sk);
838 nlh = nlmsg_hdr(skb);
839 if (skb->len < NLMSG_SPACE(0) || skb->len < nlh->nlmsg_len ||
840 nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*frn)))
841 return;
843 skb = skb_clone(skb, GFP_KERNEL);
844 if (skb == NULL)
845 return;
846 nlh = nlmsg_hdr(skb);
848 frn = (struct fib_result_nl *) NLMSG_DATA(nlh);
849 tb = fib_get_table(net, frn->tb_id_in);
851 nl_fib_lookup(frn, tb);
853 pid = NETLINK_CB(skb).pid; /* pid of sending process */
854 NETLINK_CB(skb).pid = 0; /* from kernel */
855 NETLINK_CB(skb).dst_group = 0; /* unicast */
856 netlink_unicast(net->ipv4.fibnl, skb, pid, MSG_DONTWAIT);
859 static int __net_init nl_fib_lookup_init(struct net *net)
861 struct sock *sk;
862 sk = netlink_kernel_create(net, NETLINK_FIB_LOOKUP, 0,
863 nl_fib_input, NULL, THIS_MODULE);
864 if (sk == NULL)
865 return -EAFNOSUPPORT;
866 net->ipv4.fibnl = sk;
867 return 0;
870 static void nl_fib_lookup_exit(struct net *net)
872 netlink_kernel_release(net->ipv4.fibnl);
873 net->ipv4.fibnl = NULL;
876 static void fib_disable_ip(struct net_device *dev, int force, int delay)
878 if (fib_sync_down_dev(dev, force))
879 fib_flush(dev_net(dev));
880 rt_cache_flush(dev_net(dev), delay);
881 arp_ifdown(dev);
884 static int fib_inetaddr_event(struct notifier_block *this, unsigned long event, void *ptr)
886 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
887 struct net_device *dev = ifa->ifa_dev->dev;
889 switch (event) {
890 case NETDEV_UP:
891 fib_add_ifaddr(ifa);
892 #ifdef CONFIG_IP_ROUTE_MULTIPATH
893 fib_sync_up(dev);
894 #endif
895 fib_update_nh_saddrs(dev);
896 rt_cache_flush(dev_net(dev), -1);
897 break;
898 case NETDEV_DOWN:
899 fib_del_ifaddr(ifa);
900 fib_update_nh_saddrs(dev);
901 if (ifa->ifa_dev->ifa_list == NULL) {
902 /* Last address was deleted from this interface.
903 * Disable IP.
905 fib_disable_ip(dev, 1, 0);
906 } else {
907 rt_cache_flush(dev_net(dev), -1);
909 break;
911 return NOTIFY_DONE;
914 static int fib_netdev_event(struct notifier_block *this, unsigned long event, void *ptr)
916 struct net_device *dev = ptr;
917 struct in_device *in_dev = __in_dev_get_rtnl(dev);
919 if (event == NETDEV_UNREGISTER) {
920 fib_disable_ip(dev, 2, -1);
921 return NOTIFY_DONE;
924 if (!in_dev)
925 return NOTIFY_DONE;
927 switch (event) {
928 case NETDEV_UP:
929 for_ifa(in_dev) {
930 fib_add_ifaddr(ifa);
931 } endfor_ifa(in_dev);
932 #ifdef CONFIG_IP_ROUTE_MULTIPATH
933 fib_sync_up(dev);
934 #endif
935 rt_cache_flush(dev_net(dev), -1);
936 break;
937 case NETDEV_DOWN:
938 fib_disable_ip(dev, 0, 0);
939 break;
940 case NETDEV_CHANGEMTU:
941 case NETDEV_CHANGE:
942 rt_cache_flush(dev_net(dev), 0);
943 break;
944 case NETDEV_UNREGISTER_BATCH:
945 /* The batch unregister is only called on the first
946 * device in the list of devices being unregistered.
947 * Therefore we should not pass dev_net(dev) in here.
949 rt_cache_flush_batch(NULL);
950 break;
952 return NOTIFY_DONE;
955 static struct notifier_block fib_inetaddr_notifier = {
956 .notifier_call = fib_inetaddr_event,
959 static struct notifier_block fib_netdev_notifier = {
960 .notifier_call = fib_netdev_event,
963 static int __net_init ip_fib_net_init(struct net *net)
965 int err;
966 size_t size = sizeof(struct hlist_head) * FIB_TABLE_HASHSZ;
968 /* Avoid false sharing : Use at least a full cache line */
969 size = max_t(size_t, size, L1_CACHE_BYTES);
971 net->ipv4.fib_table_hash = kzalloc(size, GFP_KERNEL);
972 if (net->ipv4.fib_table_hash == NULL)
973 return -ENOMEM;
975 err = fib4_rules_init(net);
976 if (err < 0)
977 goto fail;
978 return 0;
980 fail:
981 kfree(net->ipv4.fib_table_hash);
982 return err;
985 static void ip_fib_net_exit(struct net *net)
987 unsigned int i;
989 #ifdef CONFIG_IP_MULTIPLE_TABLES
990 fib4_rules_exit(net);
991 #endif
993 for (i = 0; i < FIB_TABLE_HASHSZ; i++) {
994 struct fib_table *tb;
995 struct hlist_head *head;
996 struct hlist_node *node, *tmp;
998 head = &net->ipv4.fib_table_hash[i];
999 hlist_for_each_entry_safe(tb, node, tmp, head, tb_hlist) {
1000 hlist_del(node);
1001 fib_table_flush(tb);
1002 fib_free_table(tb);
1005 kfree(net->ipv4.fib_table_hash);
1008 static int __net_init fib_net_init(struct net *net)
1010 int error;
1012 error = ip_fib_net_init(net);
1013 if (error < 0)
1014 goto out;
1015 error = nl_fib_lookup_init(net);
1016 if (error < 0)
1017 goto out_nlfl;
1018 error = fib_proc_init(net);
1019 if (error < 0)
1020 goto out_proc;
1021 out:
1022 return error;
1024 out_proc:
1025 nl_fib_lookup_exit(net);
1026 out_nlfl:
1027 ip_fib_net_exit(net);
1028 goto out;
1031 static void __net_exit fib_net_exit(struct net *net)
1033 fib_proc_exit(net);
1034 nl_fib_lookup_exit(net);
1035 ip_fib_net_exit(net);
1038 static struct pernet_operations fib_net_ops = {
1039 .init = fib_net_init,
1040 .exit = fib_net_exit,
1043 void __init ip_fib_init(void)
1045 rtnl_register(PF_INET, RTM_NEWROUTE, inet_rtm_newroute, NULL);
1046 rtnl_register(PF_INET, RTM_DELROUTE, inet_rtm_delroute, NULL);
1047 rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib);
1049 register_pernet_subsys(&fib_net_ops);
1050 register_netdevice_notifier(&fib_netdev_notifier);
1051 register_inetaddr_notifier(&fib_inetaddr_notifier);
1053 fib_trie_init();