Bluetooth: Add support for BCM20702A0 [0a5c:21e3]
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / ipv6 / route.c
blob8e600f827fe7c9b9e43c3cccd6332c4ce5d964d1
1 /*
2 * Linux INET6 implementation
3 * FIB front-end.
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
14 /* Changes:
16 * YOSHIFUJI Hideaki @USAGI
17 * reworked default router selection.
18 * - respect outgoing interface
19 * - select from (probably) reachable routers (i.e.
20 * routers in REACHABLE, STALE, DELAY or PROBE states).
21 * - always select the same router if it is (probably)
22 * reachable. otherwise, round-robin the list.
23 * Ville Nuorvala
24 * Fixed routing subtrees.
27 #include <linux/capability.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/times.h>
31 #include <linux/socket.h>
32 #include <linux/sockios.h>
33 #include <linux/net.h>
34 #include <linux/route.h>
35 #include <linux/netdevice.h>
36 #include <linux/in6.h>
37 #include <linux/mroute6.h>
38 #include <linux/init.h>
39 #include <linux/if_arp.h>
40 #include <linux/proc_fs.h>
41 #include <linux/seq_file.h>
42 #include <linux/nsproxy.h>
43 #include <linux/slab.h>
44 #include <net/net_namespace.h>
45 #include <net/snmp.h>
46 #include <net/ipv6.h>
47 #include <net/ip6_fib.h>
48 #include <net/ip6_route.h>
49 #include <net/ndisc.h>
50 #include <net/addrconf.h>
51 #include <net/tcp.h>
52 #include <linux/rtnetlink.h>
53 #include <net/dst.h>
54 #include <net/xfrm.h>
55 #include <net/netevent.h>
56 #include <net/netlink.h>
58 #include <asm/uaccess.h>
60 #ifdef CONFIG_SYSCTL
61 #include <linux/sysctl.h>
62 #endif
64 /* Set to 3 to get tracing. */
65 #define RT6_DEBUG 2
67 #if RT6_DEBUG >= 3
68 #define RDBG(x) printk x
69 #define RT6_TRACE(x...) printk(KERN_DEBUG x)
70 #else
71 #define RDBG(x)
72 #define RT6_TRACE(x...) do { ; } while (0)
73 #endif
75 static struct rt6_info * ip6_rt_copy(struct rt6_info *ort);
76 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
77 static unsigned int ip6_default_advmss(const struct dst_entry *dst);
78 static unsigned int ip6_default_mtu(const struct dst_entry *dst);
79 static struct dst_entry *ip6_negative_advice(struct dst_entry *);
80 static void ip6_dst_destroy(struct dst_entry *);
81 static void ip6_dst_ifdown(struct dst_entry *,
82 struct net_device *dev, int how);
83 static int ip6_dst_gc(struct dst_ops *ops);
85 static int ip6_pkt_discard(struct sk_buff *skb);
86 static int ip6_pkt_discard_out(struct sk_buff *skb);
87 static void ip6_link_failure(struct sk_buff *skb);
88 static void ip6_rt_update_pmtu(struct dst_entry *dst, u32 mtu);
90 #ifdef CONFIG_IPV6_ROUTE_INFO
91 static struct rt6_info *rt6_add_route_info(struct net *net,
92 const struct in6_addr *prefix, int prefixlen,
93 const struct in6_addr *gwaddr, int ifindex,
94 unsigned pref);
95 static struct rt6_info *rt6_get_route_info(struct net *net,
96 const struct in6_addr *prefix, int prefixlen,
97 const struct in6_addr *gwaddr, int ifindex);
98 #endif
100 static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
102 struct rt6_info *rt = (struct rt6_info *) dst;
103 struct inet_peer *peer;
104 u32 *p = NULL;
106 if (!rt->rt6i_peer)
107 rt6_bind_peer(rt, 1);
109 peer = rt->rt6i_peer;
110 if (peer) {
111 u32 *old_p = __DST_METRICS_PTR(old);
112 unsigned long prev, new;
114 p = peer->metrics;
115 if (inet_metrics_new(peer))
116 memcpy(p, old_p, sizeof(u32) * RTAX_MAX);
118 new = (unsigned long) p;
119 prev = cmpxchg(&dst->_metrics, old, new);
121 if (prev != old) {
122 p = __DST_METRICS_PTR(prev);
123 if (prev & DST_METRICS_READ_ONLY)
124 p = NULL;
127 return p;
130 static struct dst_ops ip6_dst_ops_template = {
131 .family = AF_INET6,
132 .protocol = cpu_to_be16(ETH_P_IPV6),
133 .gc = ip6_dst_gc,
134 .gc_thresh = 1024,
135 .check = ip6_dst_check,
136 .default_advmss = ip6_default_advmss,
137 .default_mtu = ip6_default_mtu,
138 .cow_metrics = ipv6_cow_metrics,
139 .destroy = ip6_dst_destroy,
140 .ifdown = ip6_dst_ifdown,
141 .negative_advice = ip6_negative_advice,
142 .link_failure = ip6_link_failure,
143 .update_pmtu = ip6_rt_update_pmtu,
144 .local_out = __ip6_local_out,
147 static unsigned int ip6_blackhole_default_mtu(const struct dst_entry *dst)
149 return 0;
152 static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
156 static u32 *ip6_rt_blackhole_cow_metrics(struct dst_entry *dst,
157 unsigned long old)
159 return NULL;
162 static struct dst_ops ip6_dst_blackhole_ops = {
163 .family = AF_INET6,
164 .protocol = cpu_to_be16(ETH_P_IPV6),
165 .destroy = ip6_dst_destroy,
166 .check = ip6_dst_check,
167 .default_mtu = ip6_blackhole_default_mtu,
168 .default_advmss = ip6_default_advmss,
169 .update_pmtu = ip6_rt_blackhole_update_pmtu,
170 .cow_metrics = ip6_rt_blackhole_cow_metrics,
173 static const u32 ip6_template_metrics[RTAX_MAX] = {
174 [RTAX_HOPLIMIT - 1] = 255,
177 static struct rt6_info ip6_null_entry_template = {
178 .dst = {
179 .__refcnt = ATOMIC_INIT(1),
180 .__use = 1,
181 .obsolete = -1,
182 .error = -ENETUNREACH,
183 .input = ip6_pkt_discard,
184 .output = ip6_pkt_discard_out,
186 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
187 .rt6i_protocol = RTPROT_KERNEL,
188 .rt6i_metric = ~(u32) 0,
189 .rt6i_ref = ATOMIC_INIT(1),
192 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
194 static int ip6_pkt_prohibit(struct sk_buff *skb);
195 static int ip6_pkt_prohibit_out(struct sk_buff *skb);
197 static struct rt6_info ip6_prohibit_entry_template = {
198 .dst = {
199 .__refcnt = ATOMIC_INIT(1),
200 .__use = 1,
201 .obsolete = -1,
202 .error = -EACCES,
203 .input = ip6_pkt_prohibit,
204 .output = ip6_pkt_prohibit_out,
206 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
207 .rt6i_protocol = RTPROT_KERNEL,
208 .rt6i_metric = ~(u32) 0,
209 .rt6i_ref = ATOMIC_INIT(1),
212 static struct rt6_info ip6_blk_hole_entry_template = {
213 .dst = {
214 .__refcnt = ATOMIC_INIT(1),
215 .__use = 1,
216 .obsolete = -1,
217 .error = -EINVAL,
218 .input = dst_discard,
219 .output = dst_discard,
221 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
222 .rt6i_protocol = RTPROT_KERNEL,
223 .rt6i_metric = ~(u32) 0,
224 .rt6i_ref = ATOMIC_INIT(1),
227 #endif
229 /* allocate dst with ip6_dst_ops */
230 static inline struct rt6_info *ip6_dst_alloc(struct dst_ops *ops,
231 struct net_device *dev,
232 int flags)
234 struct rt6_info *rt = dst_alloc(ops, dev, 0, 0, flags);
236 memset(&rt->rt6i_table, 0, sizeof(*rt) - sizeof(struct dst_entry));
238 return rt;
241 static void ip6_dst_destroy(struct dst_entry *dst)
243 struct rt6_info *rt = (struct rt6_info *)dst;
244 struct inet6_dev *idev = rt->rt6i_idev;
245 struct inet_peer *peer = rt->rt6i_peer;
247 if (idev != NULL) {
248 rt->rt6i_idev = NULL;
249 in6_dev_put(idev);
251 if (peer) {
252 rt->rt6i_peer = NULL;
253 inet_putpeer(peer);
257 static atomic_t __rt6_peer_genid = ATOMIC_INIT(0);
259 static u32 rt6_peer_genid(void)
261 return atomic_read(&__rt6_peer_genid);
264 void rt6_bind_peer(struct rt6_info *rt, int create)
266 struct inet_peer *peer;
268 peer = inet_getpeer_v6(&rt->rt6i_dst.addr, create);
269 if (peer && cmpxchg(&rt->rt6i_peer, NULL, peer) != NULL)
270 inet_putpeer(peer);
271 else
272 rt->rt6i_peer_genid = rt6_peer_genid();
275 static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
276 int how)
278 struct rt6_info *rt = (struct rt6_info *)dst;
279 struct inet6_dev *idev = rt->rt6i_idev;
280 struct net_device *loopback_dev =
281 dev_net(dev)->loopback_dev;
283 if (dev != loopback_dev && idev != NULL && idev->dev == dev) {
284 struct inet6_dev *loopback_idev =
285 in6_dev_get(loopback_dev);
286 if (loopback_idev != NULL) {
287 rt->rt6i_idev = loopback_idev;
288 in6_dev_put(idev);
293 static __inline__ int rt6_check_expired(const struct rt6_info *rt)
295 return (rt->rt6i_flags & RTF_EXPIRES) &&
296 time_after(jiffies, rt->rt6i_expires);
299 static inline int rt6_need_strict(const struct in6_addr *daddr)
301 return ipv6_addr_type(daddr) &
302 (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK);
306 * Route lookup. Any table->tb6_lock is implied.
309 static inline struct rt6_info *rt6_device_match(struct net *net,
310 struct rt6_info *rt,
311 const struct in6_addr *saddr,
312 int oif,
313 int flags)
315 struct rt6_info *local = NULL;
316 struct rt6_info *sprt;
318 if (!oif && ipv6_addr_any(saddr))
319 goto out;
321 for (sprt = rt; sprt; sprt = sprt->dst.rt6_next) {
322 struct net_device *dev = sprt->rt6i_dev;
324 if (oif) {
325 if (dev->ifindex == oif)
326 return sprt;
327 if (dev->flags & IFF_LOOPBACK) {
328 if (sprt->rt6i_idev == NULL ||
329 sprt->rt6i_idev->dev->ifindex != oif) {
330 if (flags & RT6_LOOKUP_F_IFACE && oif)
331 continue;
332 if (local && (!oif ||
333 local->rt6i_idev->dev->ifindex == oif))
334 continue;
336 local = sprt;
338 } else {
339 if (ipv6_chk_addr(net, saddr, dev,
340 flags & RT6_LOOKUP_F_IFACE))
341 return sprt;
345 if (oif) {
346 if (local)
347 return local;
349 if (flags & RT6_LOOKUP_F_IFACE)
350 return net->ipv6.ip6_null_entry;
352 out:
353 return rt;
356 #ifdef CONFIG_IPV6_ROUTER_PREF
357 static void rt6_probe(struct rt6_info *rt)
359 struct neighbour *neigh;
361 * Okay, this does not seem to be appropriate
362 * for now, however, we need to check if it
363 * is really so; aka Router Reachability Probing.
365 * Router Reachability Probe MUST be rate-limited
366 * to no more than one per minute.
368 rcu_read_lock();
369 neigh = rt ? dst_get_neighbour(&rt->dst) : NULL;
370 if (!neigh || (neigh->nud_state & NUD_VALID))
371 goto out;
372 read_lock_bh(&neigh->lock);
373 if (!(neigh->nud_state & NUD_VALID) &&
374 time_after(jiffies, neigh->updated + rt->rt6i_idev->cnf.rtr_probe_interval)) {
375 struct in6_addr mcaddr;
376 struct in6_addr *target;
378 neigh->updated = jiffies;
379 read_unlock_bh(&neigh->lock);
381 target = (struct in6_addr *)&neigh->primary_key;
382 addrconf_addr_solict_mult(target, &mcaddr);
383 ndisc_send_ns(rt->rt6i_dev, NULL, target, &mcaddr, NULL);
384 } else {
385 read_unlock_bh(&neigh->lock);
387 out:
388 rcu_read_unlock();
390 #else
391 static inline void rt6_probe(struct rt6_info *rt)
394 #endif
397 * Default Router Selection (RFC 2461 6.3.6)
399 static inline int rt6_check_dev(struct rt6_info *rt, int oif)
401 struct net_device *dev = rt->rt6i_dev;
402 if (!oif || dev->ifindex == oif)
403 return 2;
404 if ((dev->flags & IFF_LOOPBACK) &&
405 rt->rt6i_idev && rt->rt6i_idev->dev->ifindex == oif)
406 return 1;
407 return 0;
410 static inline int rt6_check_neigh(struct rt6_info *rt)
412 struct neighbour *neigh;
413 int m;
415 rcu_read_lock();
416 neigh = dst_get_neighbour(&rt->dst);
417 if (rt->rt6i_flags & RTF_NONEXTHOP ||
418 !(rt->rt6i_flags & RTF_GATEWAY))
419 m = 1;
420 else if (neigh) {
421 read_lock_bh(&neigh->lock);
422 if (neigh->nud_state & NUD_VALID)
423 m = 2;
424 #ifdef CONFIG_IPV6_ROUTER_PREF
425 else if (neigh->nud_state & NUD_FAILED)
426 m = 0;
427 #endif
428 else
429 m = 1;
430 read_unlock_bh(&neigh->lock);
431 } else
432 m = 0;
433 rcu_read_unlock();
434 return m;
437 static int rt6_score_route(struct rt6_info *rt, int oif,
438 int strict)
440 int m, n;
442 m = rt6_check_dev(rt, oif);
443 if (!m && (strict & RT6_LOOKUP_F_IFACE))
444 return -1;
445 #ifdef CONFIG_IPV6_ROUTER_PREF
446 m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(rt->rt6i_flags)) << 2;
447 #endif
448 n = rt6_check_neigh(rt);
449 if (!n && (strict & RT6_LOOKUP_F_REACHABLE))
450 return -1;
451 return m;
454 static struct rt6_info *find_match(struct rt6_info *rt, int oif, int strict,
455 int *mpri, struct rt6_info *match)
457 int m;
459 if (rt6_check_expired(rt))
460 goto out;
462 m = rt6_score_route(rt, oif, strict);
463 if (m < 0)
464 goto out;
466 if (m > *mpri) {
467 if (strict & RT6_LOOKUP_F_REACHABLE)
468 rt6_probe(match);
469 *mpri = m;
470 match = rt;
471 } else if (strict & RT6_LOOKUP_F_REACHABLE) {
472 rt6_probe(rt);
475 out:
476 return match;
479 static struct rt6_info *find_rr_leaf(struct fib6_node *fn,
480 struct rt6_info *rr_head,
481 u32 metric, int oif, int strict)
483 struct rt6_info *rt, *match;
484 int mpri = -1;
486 match = NULL;
487 for (rt = rr_head; rt && rt->rt6i_metric == metric;
488 rt = rt->dst.rt6_next)
489 match = find_match(rt, oif, strict, &mpri, match);
490 for (rt = fn->leaf; rt && rt != rr_head && rt->rt6i_metric == metric;
491 rt = rt->dst.rt6_next)
492 match = find_match(rt, oif, strict, &mpri, match);
494 return match;
497 static struct rt6_info *rt6_select(struct fib6_node *fn, int oif, int strict)
499 struct rt6_info *match, *rt0;
500 struct net *net;
502 RT6_TRACE("%s(fn->leaf=%p, oif=%d)\n",
503 __func__, fn->leaf, oif);
505 rt0 = fn->rr_ptr;
506 if (!rt0)
507 fn->rr_ptr = rt0 = fn->leaf;
509 match = find_rr_leaf(fn, rt0, rt0->rt6i_metric, oif, strict);
511 if (!match &&
512 (strict & RT6_LOOKUP_F_REACHABLE)) {
513 struct rt6_info *next = rt0->dst.rt6_next;
515 /* no entries matched; do round-robin */
516 if (!next || next->rt6i_metric != rt0->rt6i_metric)
517 next = fn->leaf;
519 if (next != rt0)
520 fn->rr_ptr = next;
523 RT6_TRACE("%s() => %p\n",
524 __func__, match);
526 net = dev_net(rt0->rt6i_dev);
527 return match ? match : net->ipv6.ip6_null_entry;
530 #ifdef CONFIG_IPV6_ROUTE_INFO
531 int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
532 const struct in6_addr *gwaddr)
534 struct net *net = dev_net(dev);
535 struct route_info *rinfo = (struct route_info *) opt;
536 struct in6_addr prefix_buf, *prefix;
537 unsigned int pref;
538 unsigned long lifetime;
539 struct rt6_info *rt;
541 if (len < sizeof(struct route_info)) {
542 return -EINVAL;
545 /* Sanity check for prefix_len and length */
546 if (rinfo->length > 3) {
547 return -EINVAL;
548 } else if (rinfo->prefix_len > 128) {
549 return -EINVAL;
550 } else if (rinfo->prefix_len > 64) {
551 if (rinfo->length < 2) {
552 return -EINVAL;
554 } else if (rinfo->prefix_len > 0) {
555 if (rinfo->length < 1) {
556 return -EINVAL;
560 pref = rinfo->route_pref;
561 if (pref == ICMPV6_ROUTER_PREF_INVALID)
562 return -EINVAL;
564 lifetime = addrconf_timeout_fixup(ntohl(rinfo->lifetime), HZ);
566 if (rinfo->length == 3)
567 prefix = (struct in6_addr *)rinfo->prefix;
568 else {
569 /* this function is safe */
570 ipv6_addr_prefix(&prefix_buf,
571 (struct in6_addr *)rinfo->prefix,
572 rinfo->prefix_len);
573 prefix = &prefix_buf;
576 rt = rt6_get_route_info(net, prefix, rinfo->prefix_len, gwaddr,
577 dev->ifindex);
579 if (rt && !lifetime) {
580 ip6_del_rt(rt);
581 rt = NULL;
584 if (!rt && lifetime)
585 rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr, dev->ifindex,
586 pref);
587 else if (rt)
588 rt->rt6i_flags = RTF_ROUTEINFO |
589 (rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
591 if (rt) {
592 if (!addrconf_finite_timeout(lifetime)) {
593 rt->rt6i_flags &= ~RTF_EXPIRES;
594 } else {
595 rt->rt6i_expires = jiffies + HZ * lifetime;
596 rt->rt6i_flags |= RTF_EXPIRES;
598 dst_release(&rt->dst);
600 return 0;
602 #endif
604 #define BACKTRACK(__net, saddr) \
605 do { \
606 if (rt == __net->ipv6.ip6_null_entry) { \
607 struct fib6_node *pn; \
608 while (1) { \
609 if (fn->fn_flags & RTN_TL_ROOT) \
610 goto out; \
611 pn = fn->parent; \
612 if (FIB6_SUBTREE(pn) && FIB6_SUBTREE(pn) != fn) \
613 fn = fib6_lookup(FIB6_SUBTREE(pn), NULL, saddr); \
614 else \
615 fn = pn; \
616 if (fn->fn_flags & RTN_RTINFO) \
617 goto restart; \
620 } while(0)
622 static struct rt6_info *ip6_pol_route_lookup(struct net *net,
623 struct fib6_table *table,
624 struct flowi6 *fl6, int flags)
626 struct fib6_node *fn;
627 struct rt6_info *rt;
629 read_lock_bh(&table->tb6_lock);
630 fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
631 restart:
632 rt = fn->leaf;
633 rt = rt6_device_match(net, rt, &fl6->saddr, fl6->flowi6_oif, flags);
634 BACKTRACK(net, &fl6->saddr);
635 out:
636 dst_use(&rt->dst, jiffies);
637 read_unlock_bh(&table->tb6_lock);
638 return rt;
642 struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr,
643 const struct in6_addr *saddr, int oif, int strict)
645 struct flowi6 fl6 = {
646 .flowi6_oif = oif,
647 .daddr = *daddr,
649 struct dst_entry *dst;
650 int flags = strict ? RT6_LOOKUP_F_IFACE : 0;
652 if (saddr) {
653 memcpy(&fl6.saddr, saddr, sizeof(*saddr));
654 flags |= RT6_LOOKUP_F_HAS_SADDR;
657 dst = fib6_rule_lookup(net, &fl6, flags, ip6_pol_route_lookup);
658 if (dst->error == 0)
659 return (struct rt6_info *) dst;
661 dst_release(dst);
663 return NULL;
666 EXPORT_SYMBOL(rt6_lookup);
668 /* ip6_ins_rt is called with FREE table->tb6_lock.
669 It takes new route entry, the addition fails by any reason the
670 route is freed. In any case, if caller does not hold it, it may
671 be destroyed.
674 static int __ip6_ins_rt(struct rt6_info *rt, struct nl_info *info)
676 int err;
677 struct fib6_table *table;
679 table = rt->rt6i_table;
680 write_lock_bh(&table->tb6_lock);
681 err = fib6_add(&table->tb6_root, rt, info);
682 write_unlock_bh(&table->tb6_lock);
684 return err;
687 int ip6_ins_rt(struct rt6_info *rt)
689 struct nl_info info = {
690 .nl_net = dev_net(rt->rt6i_dev),
692 return __ip6_ins_rt(rt, &info);
695 static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort, const struct in6_addr *daddr,
696 const struct in6_addr *saddr)
698 struct rt6_info *rt;
701 * Clone the route.
704 rt = ip6_rt_copy(ort);
706 if (rt) {
707 struct neighbour *neigh;
708 int attempts = !in_softirq();
710 if (!(rt->rt6i_flags&RTF_GATEWAY)) {
711 if (rt->rt6i_dst.plen != 128 &&
712 ipv6_addr_equal(&rt->rt6i_dst.addr, daddr))
713 rt->rt6i_flags |= RTF_ANYCAST;
714 ipv6_addr_copy(&rt->rt6i_gateway, daddr);
717 ipv6_addr_copy(&rt->rt6i_dst.addr, daddr);
718 rt->rt6i_dst.plen = 128;
719 rt->rt6i_flags |= RTF_CACHE;
720 rt->dst.flags |= DST_HOST;
722 #ifdef CONFIG_IPV6_SUBTREES
723 if (rt->rt6i_src.plen && saddr) {
724 ipv6_addr_copy(&rt->rt6i_src.addr, saddr);
725 rt->rt6i_src.plen = 128;
727 #endif
729 retry:
730 neigh = ndisc_get_neigh(rt->rt6i_dev, &rt->rt6i_gateway);
731 if (IS_ERR(neigh)) {
732 struct net *net = dev_net(rt->rt6i_dev);
733 int saved_rt_min_interval =
734 net->ipv6.sysctl.ip6_rt_gc_min_interval;
735 int saved_rt_elasticity =
736 net->ipv6.sysctl.ip6_rt_gc_elasticity;
738 if (attempts-- > 0) {
739 net->ipv6.sysctl.ip6_rt_gc_elasticity = 1;
740 net->ipv6.sysctl.ip6_rt_gc_min_interval = 0;
742 ip6_dst_gc(&net->ipv6.ip6_dst_ops);
744 net->ipv6.sysctl.ip6_rt_gc_elasticity =
745 saved_rt_elasticity;
746 net->ipv6.sysctl.ip6_rt_gc_min_interval =
747 saved_rt_min_interval;
748 goto retry;
751 if (net_ratelimit())
752 printk(KERN_WARNING
753 "ipv6: Neighbour table overflow.\n");
754 dst_free(&rt->dst);
755 return NULL;
757 dst_set_neighbour(&rt->dst, neigh);
760 return rt;
763 static struct rt6_info *rt6_alloc_clone(struct rt6_info *ort, const struct in6_addr *daddr)
765 struct rt6_info *rt = ip6_rt_copy(ort);
766 if (rt) {
767 ipv6_addr_copy(&rt->rt6i_dst.addr, daddr);
768 rt->rt6i_dst.plen = 128;
769 rt->rt6i_flags |= RTF_CACHE;
770 rt->dst.flags |= DST_HOST;
771 dst_set_neighbour(&rt->dst, neigh_clone(dst_get_neighbour_raw(&ort->dst)));
773 return rt;
776 static struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table, int oif,
777 struct flowi6 *fl6, int flags)
779 struct fib6_node *fn;
780 struct rt6_info *rt, *nrt;
781 int strict = 0;
782 int attempts = 3;
783 int err;
784 int reachable = net->ipv6.devconf_all->forwarding ? 0 : RT6_LOOKUP_F_REACHABLE;
786 strict |= flags & RT6_LOOKUP_F_IFACE;
788 relookup:
789 read_lock_bh(&table->tb6_lock);
791 restart_2:
792 fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
794 restart:
795 rt = rt6_select(fn, oif, strict | reachable);
797 BACKTRACK(net, &fl6->saddr);
798 if (rt == net->ipv6.ip6_null_entry ||
799 rt->rt6i_flags & RTF_CACHE)
800 goto out;
802 dst_hold(&rt->dst);
803 read_unlock_bh(&table->tb6_lock);
805 if (!dst_get_neighbour_raw(&rt->dst) && !(rt->rt6i_flags & RTF_NONEXTHOP))
806 nrt = rt6_alloc_cow(rt, &fl6->daddr, &fl6->saddr);
807 else if (!(rt->dst.flags & DST_HOST))
808 nrt = rt6_alloc_clone(rt, &fl6->daddr);
809 else
810 goto out2;
812 dst_release(&rt->dst);
813 rt = nrt ? : net->ipv6.ip6_null_entry;
815 dst_hold(&rt->dst);
816 if (nrt) {
817 err = ip6_ins_rt(nrt);
818 if (!err)
819 goto out2;
822 if (--attempts <= 0)
823 goto out2;
826 * Race condition! In the gap, when table->tb6_lock was
827 * released someone could insert this route. Relookup.
829 dst_release(&rt->dst);
830 goto relookup;
832 out:
833 if (reachable) {
834 reachable = 0;
835 goto restart_2;
837 dst_hold(&rt->dst);
838 read_unlock_bh(&table->tb6_lock);
839 out2:
840 rt->dst.lastuse = jiffies;
841 rt->dst.__use++;
843 return rt;
846 static struct rt6_info *ip6_pol_route_input(struct net *net, struct fib6_table *table,
847 struct flowi6 *fl6, int flags)
849 return ip6_pol_route(net, table, fl6->flowi6_iif, fl6, flags);
852 void ip6_route_input(struct sk_buff *skb)
854 const struct ipv6hdr *iph = ipv6_hdr(skb);
855 struct net *net = dev_net(skb->dev);
856 int flags = RT6_LOOKUP_F_HAS_SADDR;
857 struct flowi6 fl6 = {
858 .flowi6_iif = skb->dev->ifindex,
859 .daddr = iph->daddr,
860 .saddr = iph->saddr,
861 .flowlabel = (* (__be32 *) iph)&IPV6_FLOWINFO_MASK,
862 .flowi6_mark = skb->mark,
863 .flowi6_proto = iph->nexthdr,
866 if (rt6_need_strict(&iph->daddr) && skb->dev->type != ARPHRD_PIMREG)
867 flags |= RT6_LOOKUP_F_IFACE;
869 skb_dst_set(skb, fib6_rule_lookup(net, &fl6, flags, ip6_pol_route_input));
872 static struct rt6_info *ip6_pol_route_output(struct net *net, struct fib6_table *table,
873 struct flowi6 *fl6, int flags)
875 return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, flags);
878 struct dst_entry * ip6_route_output(struct net *net, const struct sock *sk,
879 struct flowi6 *fl6)
881 int flags = 0;
883 if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr))
884 flags |= RT6_LOOKUP_F_IFACE;
886 if (!ipv6_addr_any(&fl6->saddr))
887 flags |= RT6_LOOKUP_F_HAS_SADDR;
888 else if (sk)
889 flags |= rt6_srcprefs2flags(inet6_sk(sk)->srcprefs);
891 return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_output);
894 EXPORT_SYMBOL(ip6_route_output);
896 struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig)
898 struct rt6_info *rt, *ort = (struct rt6_info *) dst_orig;
899 struct dst_entry *new = NULL;
901 rt = dst_alloc(&ip6_dst_blackhole_ops, ort->dst.dev, 1, 0, 0);
902 if (rt) {
903 memset(&rt->rt6i_table, 0, sizeof(*rt) - sizeof(struct dst_entry));
905 new = &rt->dst;
907 new->__use = 1;
908 new->input = dst_discard;
909 new->output = dst_discard;
911 dst_copy_metrics(new, &ort->dst);
912 rt->rt6i_idev = ort->rt6i_idev;
913 if (rt->rt6i_idev)
914 in6_dev_hold(rt->rt6i_idev);
915 rt->rt6i_expires = 0;
917 ipv6_addr_copy(&rt->rt6i_gateway, &ort->rt6i_gateway);
918 rt->rt6i_flags = ort->rt6i_flags & ~RTF_EXPIRES;
919 rt->rt6i_metric = 0;
921 memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
922 #ifdef CONFIG_IPV6_SUBTREES
923 memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
924 #endif
926 dst_free(new);
929 dst_release(dst_orig);
930 return new ? new : ERR_PTR(-ENOMEM);
934 * Destination cache support functions
937 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
939 struct rt6_info *rt;
941 rt = (struct rt6_info *) dst;
943 if (rt->rt6i_node && (rt->rt6i_node->fn_sernum == cookie)) {
944 if (rt->rt6i_peer_genid != rt6_peer_genid()) {
945 if (!rt->rt6i_peer)
946 rt6_bind_peer(rt, 0);
947 rt->rt6i_peer_genid = rt6_peer_genid();
949 return dst;
951 return NULL;
954 static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
956 struct rt6_info *rt = (struct rt6_info *) dst;
958 if (rt) {
959 if (rt->rt6i_flags & RTF_CACHE) {
960 if (rt6_check_expired(rt)) {
961 ip6_del_rt(rt);
962 dst = NULL;
964 } else {
965 dst_release(dst);
966 dst = NULL;
969 return dst;
972 static void ip6_link_failure(struct sk_buff *skb)
974 struct rt6_info *rt;
976 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
978 rt = (struct rt6_info *) skb_dst(skb);
979 if (rt) {
980 if (rt->rt6i_flags&RTF_CACHE) {
981 dst_set_expires(&rt->dst, 0);
982 rt->rt6i_flags |= RTF_EXPIRES;
983 } else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT))
984 rt->rt6i_node->fn_sernum = -1;
988 static void ip6_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
990 struct rt6_info *rt6 = (struct rt6_info*)dst;
992 if (mtu < dst_mtu(dst) && rt6->rt6i_dst.plen == 128) {
993 rt6->rt6i_flags |= RTF_MODIFIED;
994 if (mtu < IPV6_MIN_MTU) {
995 u32 features = dst_metric(dst, RTAX_FEATURES);
996 mtu = IPV6_MIN_MTU;
997 features |= RTAX_FEATURE_ALLFRAG;
998 dst_metric_set(dst, RTAX_FEATURES, features);
1000 dst_metric_set(dst, RTAX_MTU, mtu);
1004 static unsigned int ip6_default_advmss(const struct dst_entry *dst)
1006 struct net_device *dev = dst->dev;
1007 unsigned int mtu = dst_mtu(dst);
1008 struct net *net = dev_net(dev);
1010 mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
1012 if (mtu < net->ipv6.sysctl.ip6_rt_min_advmss)
1013 mtu = net->ipv6.sysctl.ip6_rt_min_advmss;
1016 * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and
1017 * corresponding MSS is IPV6_MAXPLEN - tcp_header_size.
1018 * IPV6_MAXPLEN is also valid and means: "any MSS,
1019 * rely only on pmtu discovery"
1021 if (mtu > IPV6_MAXPLEN - sizeof(struct tcphdr))
1022 mtu = IPV6_MAXPLEN;
1023 return mtu;
1026 static unsigned int ip6_default_mtu(const struct dst_entry *dst)
1028 unsigned int mtu = IPV6_MIN_MTU;
1029 struct inet6_dev *idev;
1031 rcu_read_lock();
1032 idev = __in6_dev_get(dst->dev);
1033 if (idev)
1034 mtu = idev->cnf.mtu6;
1035 rcu_read_unlock();
1037 return mtu;
1040 static struct dst_entry *icmp6_dst_gc_list;
1041 static DEFINE_SPINLOCK(icmp6_dst_lock);
1043 struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
1044 struct neighbour *neigh,
1045 const struct in6_addr *addr)
1047 struct rt6_info *rt;
1048 struct inet6_dev *idev = in6_dev_get(dev);
1049 struct net *net = dev_net(dev);
1051 if (unlikely(idev == NULL))
1052 return NULL;
1054 rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, dev, 0);
1055 if (unlikely(rt == NULL)) {
1056 in6_dev_put(idev);
1057 goto out;
1060 if (neigh)
1061 neigh_hold(neigh);
1062 else {
1063 neigh = ndisc_get_neigh(dev, addr);
1064 if (IS_ERR(neigh))
1065 neigh = NULL;
1068 rt->rt6i_idev = idev;
1069 dst_set_neighbour(&rt->dst, neigh);
1070 atomic_set(&rt->dst.__refcnt, 1);
1071 dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 255);
1072 rt->dst.output = ip6_output;
1074 spin_lock_bh(&icmp6_dst_lock);
1075 rt->dst.next = icmp6_dst_gc_list;
1076 icmp6_dst_gc_list = &rt->dst;
1077 spin_unlock_bh(&icmp6_dst_lock);
1079 fib6_force_start_gc(net);
1081 out:
1082 return &rt->dst;
1085 int icmp6_dst_gc(void)
1087 struct dst_entry *dst, **pprev;
1088 int more = 0;
1090 spin_lock_bh(&icmp6_dst_lock);
1091 pprev = &icmp6_dst_gc_list;
1093 while ((dst = *pprev) != NULL) {
1094 if (!atomic_read(&dst->__refcnt)) {
1095 *pprev = dst->next;
1096 dst_free(dst);
1097 } else {
1098 pprev = &dst->next;
1099 ++more;
1103 spin_unlock_bh(&icmp6_dst_lock);
1105 return more;
1108 static void icmp6_clean_all(int (*func)(struct rt6_info *rt, void *arg),
1109 void *arg)
1111 struct dst_entry *dst, **pprev;
1113 spin_lock_bh(&icmp6_dst_lock);
1114 pprev = &icmp6_dst_gc_list;
1115 while ((dst = *pprev) != NULL) {
1116 struct rt6_info *rt = (struct rt6_info *) dst;
1117 if (func(rt, arg)) {
1118 *pprev = dst->next;
1119 dst_free(dst);
1120 } else {
1121 pprev = &dst->next;
1124 spin_unlock_bh(&icmp6_dst_lock);
1127 static int ip6_dst_gc(struct dst_ops *ops)
1129 unsigned long now = jiffies;
1130 struct net *net = container_of(ops, struct net, ipv6.ip6_dst_ops);
1131 int rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval;
1132 int rt_max_size = net->ipv6.sysctl.ip6_rt_max_size;
1133 int rt_elasticity = net->ipv6.sysctl.ip6_rt_gc_elasticity;
1134 int rt_gc_timeout = net->ipv6.sysctl.ip6_rt_gc_timeout;
1135 unsigned long rt_last_gc = net->ipv6.ip6_rt_last_gc;
1136 int entries;
1138 entries = dst_entries_get_fast(ops);
1139 if (time_after(rt_last_gc + rt_min_interval, now) &&
1140 entries <= rt_max_size)
1141 goto out;
1143 net->ipv6.ip6_rt_gc_expire++;
1144 fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net);
1145 net->ipv6.ip6_rt_last_gc = now;
1146 entries = dst_entries_get_slow(ops);
1147 if (entries < ops->gc_thresh)
1148 net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1;
1149 out:
1150 net->ipv6.ip6_rt_gc_expire -= net->ipv6.ip6_rt_gc_expire>>rt_elasticity;
1151 return entries > rt_max_size;
1154 /* Clean host part of a prefix. Not necessary in radix tree,
1155 but results in cleaner routing tables.
1157 Remove it only when all the things will work!
1160 int ip6_dst_hoplimit(struct dst_entry *dst)
1162 int hoplimit = dst_metric_raw(dst, RTAX_HOPLIMIT);
1163 if (hoplimit == 0) {
1164 struct net_device *dev = dst->dev;
1165 struct inet6_dev *idev;
1167 rcu_read_lock();
1168 idev = __in6_dev_get(dev);
1169 if (idev)
1170 hoplimit = idev->cnf.hop_limit;
1171 else
1172 hoplimit = dev_net(dev)->ipv6.devconf_all->hop_limit;
1173 rcu_read_unlock();
1175 return hoplimit;
1177 EXPORT_SYMBOL(ip6_dst_hoplimit);
1183 int ip6_route_add(struct fib6_config *cfg)
1185 int err;
1186 struct net *net = cfg->fc_nlinfo.nl_net;
1187 struct rt6_info *rt = NULL;
1188 struct net_device *dev = NULL;
1189 struct inet6_dev *idev = NULL;
1190 struct fib6_table *table;
1191 int addr_type;
1193 if (cfg->fc_dst_len > 128 || cfg->fc_src_len > 128)
1194 return -EINVAL;
1195 #ifndef CONFIG_IPV6_SUBTREES
1196 if (cfg->fc_src_len)
1197 return -EINVAL;
1198 #endif
1199 if (cfg->fc_ifindex) {
1200 err = -ENODEV;
1201 dev = dev_get_by_index(net, cfg->fc_ifindex);
1202 if (!dev)
1203 goto out;
1204 idev = in6_dev_get(dev);
1205 if (!idev)
1206 goto out;
1209 if (cfg->fc_metric == 0)
1210 cfg->fc_metric = IP6_RT_PRIO_USER;
1212 table = fib6_new_table(net, cfg->fc_table);
1213 if (table == NULL) {
1214 err = -ENOBUFS;
1215 goto out;
1218 rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, NULL, DST_NOCOUNT);
1220 if (rt == NULL) {
1221 err = -ENOMEM;
1222 goto out;
1225 rt->dst.obsolete = -1;
1226 rt->rt6i_expires = (cfg->fc_flags & RTF_EXPIRES) ?
1227 jiffies + clock_t_to_jiffies(cfg->fc_expires) :
1230 if (cfg->fc_protocol == RTPROT_UNSPEC)
1231 cfg->fc_protocol = RTPROT_BOOT;
1232 rt->rt6i_protocol = cfg->fc_protocol;
1234 addr_type = ipv6_addr_type(&cfg->fc_dst);
1236 if (addr_type & IPV6_ADDR_MULTICAST)
1237 rt->dst.input = ip6_mc_input;
1238 else if (cfg->fc_flags & RTF_LOCAL)
1239 rt->dst.input = ip6_input;
1240 else
1241 rt->dst.input = ip6_forward;
1243 rt->dst.output = ip6_output;
1245 ipv6_addr_prefix(&rt->rt6i_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
1246 rt->rt6i_dst.plen = cfg->fc_dst_len;
1247 if (rt->rt6i_dst.plen == 128)
1248 rt->dst.flags |= DST_HOST;
1250 #ifdef CONFIG_IPV6_SUBTREES
1251 ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len);
1252 rt->rt6i_src.plen = cfg->fc_src_len;
1253 #endif
1255 rt->rt6i_metric = cfg->fc_metric;
1257 /* We cannot add true routes via loopback here,
1258 they would result in kernel looping; promote them to reject routes
1260 if ((cfg->fc_flags & RTF_REJECT) ||
1261 (dev && (dev->flags&IFF_LOOPBACK) && !(addr_type&IPV6_ADDR_LOOPBACK)
1262 && !(cfg->fc_flags&RTF_LOCAL))) {
1263 /* hold loopback dev/idev if we haven't done so. */
1264 if (dev != net->loopback_dev) {
1265 if (dev) {
1266 dev_put(dev);
1267 in6_dev_put(idev);
1269 dev = net->loopback_dev;
1270 dev_hold(dev);
1271 idev = in6_dev_get(dev);
1272 if (!idev) {
1273 err = -ENODEV;
1274 goto out;
1277 rt->dst.output = ip6_pkt_discard_out;
1278 rt->dst.input = ip6_pkt_discard;
1279 rt->dst.error = -ENETUNREACH;
1280 rt->rt6i_flags = RTF_REJECT|RTF_NONEXTHOP;
1281 goto install_route;
1284 if (cfg->fc_flags & RTF_GATEWAY) {
1285 const struct in6_addr *gw_addr;
1286 int gwa_type;
1288 gw_addr = &cfg->fc_gateway;
1289 ipv6_addr_copy(&rt->rt6i_gateway, gw_addr);
1290 gwa_type = ipv6_addr_type(gw_addr);
1292 if (gwa_type != (IPV6_ADDR_LINKLOCAL|IPV6_ADDR_UNICAST)) {
1293 struct rt6_info *grt;
1295 /* IPv6 strictly inhibits using not link-local
1296 addresses as nexthop address.
1297 Otherwise, router will not able to send redirects.
1298 It is very good, but in some (rare!) circumstances
1299 (SIT, PtP, NBMA NOARP links) it is handy to allow
1300 some exceptions. --ANK
1302 err = -EINVAL;
1303 if (!(gwa_type&IPV6_ADDR_UNICAST))
1304 goto out;
1306 grt = rt6_lookup(net, gw_addr, NULL, cfg->fc_ifindex, 1);
1308 err = -EHOSTUNREACH;
1309 if (grt == NULL)
1310 goto out;
1311 if (dev) {
1312 if (dev != grt->rt6i_dev) {
1313 dst_release(&grt->dst);
1314 goto out;
1316 } else {
1317 dev = grt->rt6i_dev;
1318 idev = grt->rt6i_idev;
1319 dev_hold(dev);
1320 in6_dev_hold(grt->rt6i_idev);
1322 if (!(grt->rt6i_flags&RTF_GATEWAY))
1323 err = 0;
1324 dst_release(&grt->dst);
1326 if (err)
1327 goto out;
1329 err = -EINVAL;
1330 if (dev == NULL || (dev->flags&IFF_LOOPBACK))
1331 goto out;
1334 err = -ENODEV;
1335 if (dev == NULL)
1336 goto out;
1338 if (!ipv6_addr_any(&cfg->fc_prefsrc)) {
1339 if (!ipv6_chk_addr(net, &cfg->fc_prefsrc, dev, 0)) {
1340 err = -EINVAL;
1341 goto out;
1343 ipv6_addr_copy(&rt->rt6i_prefsrc.addr, &cfg->fc_prefsrc);
1344 rt->rt6i_prefsrc.plen = 128;
1345 } else
1346 rt->rt6i_prefsrc.plen = 0;
1348 if (cfg->fc_flags & (RTF_GATEWAY | RTF_NONEXTHOP)) {
1349 struct neighbour *neigh = __neigh_lookup_errno(&nd_tbl, &rt->rt6i_gateway, dev);
1350 if (IS_ERR(neigh)) {
1351 err = PTR_ERR(neigh);
1352 goto out;
1354 dst_set_neighbour(&rt->dst, neigh);
1357 rt->rt6i_flags = cfg->fc_flags;
1359 install_route:
1360 if (cfg->fc_mx) {
1361 struct nlattr *nla;
1362 int remaining;
1364 nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) {
1365 int type = nla_type(nla);
1367 if (type) {
1368 if (type > RTAX_MAX) {
1369 err = -EINVAL;
1370 goto out;
1373 dst_metric_set(&rt->dst, type, nla_get_u32(nla));
1378 rt->dst.dev = dev;
1379 rt->rt6i_idev = idev;
1380 rt->rt6i_table = table;
1382 cfg->fc_nlinfo.nl_net = dev_net(dev);
1384 return __ip6_ins_rt(rt, &cfg->fc_nlinfo);
1386 out:
1387 if (dev)
1388 dev_put(dev);
1389 if (idev)
1390 in6_dev_put(idev);
1391 if (rt)
1392 dst_free(&rt->dst);
1393 return err;
1396 static int __ip6_del_rt(struct rt6_info *rt, struct nl_info *info)
1398 int err;
1399 struct fib6_table *table;
1400 struct net *net = dev_net(rt->rt6i_dev);
1402 if (rt == net->ipv6.ip6_null_entry)
1403 return -ENOENT;
1405 table = rt->rt6i_table;
1406 write_lock_bh(&table->tb6_lock);
1408 err = fib6_del(rt, info);
1409 dst_release(&rt->dst);
1411 write_unlock_bh(&table->tb6_lock);
1413 return err;
1416 int ip6_del_rt(struct rt6_info *rt)
1418 struct nl_info info = {
1419 .nl_net = dev_net(rt->rt6i_dev),
1421 return __ip6_del_rt(rt, &info);
1424 static int ip6_route_del(struct fib6_config *cfg)
1426 struct fib6_table *table;
1427 struct fib6_node *fn;
1428 struct rt6_info *rt;
1429 int err = -ESRCH;
1431 table = fib6_get_table(cfg->fc_nlinfo.nl_net, cfg->fc_table);
1432 if (table == NULL)
1433 return err;
1435 read_lock_bh(&table->tb6_lock);
1437 fn = fib6_locate(&table->tb6_root,
1438 &cfg->fc_dst, cfg->fc_dst_len,
1439 &cfg->fc_src, cfg->fc_src_len);
1441 if (fn) {
1442 for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
1443 if (cfg->fc_ifindex &&
1444 (rt->rt6i_dev == NULL ||
1445 rt->rt6i_dev->ifindex != cfg->fc_ifindex))
1446 continue;
1447 if (cfg->fc_flags & RTF_GATEWAY &&
1448 !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway))
1449 continue;
1450 if (cfg->fc_metric && cfg->fc_metric != rt->rt6i_metric)
1451 continue;
1452 dst_hold(&rt->dst);
1453 read_unlock_bh(&table->tb6_lock);
1455 return __ip6_del_rt(rt, &cfg->fc_nlinfo);
1458 read_unlock_bh(&table->tb6_lock);
1460 return err;
1464 * Handle redirects
1466 struct ip6rd_flowi {
1467 struct flowi6 fl6;
1468 struct in6_addr gateway;
1471 static struct rt6_info *__ip6_route_redirect(struct net *net,
1472 struct fib6_table *table,
1473 struct flowi6 *fl6,
1474 int flags)
1476 struct ip6rd_flowi *rdfl = (struct ip6rd_flowi *)fl6;
1477 struct rt6_info *rt;
1478 struct fib6_node *fn;
1481 * Get the "current" route for this destination and
1482 * check if the redirect has come from approriate router.
1484 * RFC 2461 specifies that redirects should only be
1485 * accepted if they come from the nexthop to the target.
1486 * Due to the way the routes are chosen, this notion
1487 * is a bit fuzzy and one might need to check all possible
1488 * routes.
1491 read_lock_bh(&table->tb6_lock);
1492 fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
1493 restart:
1494 for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
1496 * Current route is on-link; redirect is always invalid.
1498 * Seems, previous statement is not true. It could
1499 * be node, which looks for us as on-link (f.e. proxy ndisc)
1500 * But then router serving it might decide, that we should
1501 * know truth 8)8) --ANK (980726).
1503 if (rt6_check_expired(rt))
1504 continue;
1505 if (!(rt->rt6i_flags & RTF_GATEWAY))
1506 continue;
1507 if (fl6->flowi6_oif != rt->rt6i_dev->ifindex)
1508 continue;
1509 if (!ipv6_addr_equal(&rdfl->gateway, &rt->rt6i_gateway))
1510 continue;
1511 break;
1514 if (!rt)
1515 rt = net->ipv6.ip6_null_entry;
1516 BACKTRACK(net, &fl6->saddr);
1517 out:
1518 dst_hold(&rt->dst);
1520 read_unlock_bh(&table->tb6_lock);
1522 return rt;
1525 static struct rt6_info *ip6_route_redirect(const struct in6_addr *dest,
1526 const struct in6_addr *src,
1527 const struct in6_addr *gateway,
1528 struct net_device *dev)
1530 int flags = RT6_LOOKUP_F_HAS_SADDR;
1531 struct net *net = dev_net(dev);
1532 struct ip6rd_flowi rdfl = {
1533 .fl6 = {
1534 .flowi6_oif = dev->ifindex,
1535 .daddr = *dest,
1536 .saddr = *src,
1540 ipv6_addr_copy(&rdfl.gateway, gateway);
1542 if (rt6_need_strict(dest))
1543 flags |= RT6_LOOKUP_F_IFACE;
1545 return (struct rt6_info *)fib6_rule_lookup(net, &rdfl.fl6,
1546 flags, __ip6_route_redirect);
1549 void rt6_redirect(const struct in6_addr *dest, const struct in6_addr *src,
1550 const struct in6_addr *saddr,
1551 struct neighbour *neigh, u8 *lladdr, int on_link)
1553 struct rt6_info *rt, *nrt = NULL;
1554 struct netevent_redirect netevent;
1555 struct net *net = dev_net(neigh->dev);
1557 rt = ip6_route_redirect(dest, src, saddr, neigh->dev);
1559 if (rt == net->ipv6.ip6_null_entry) {
1560 if (net_ratelimit())
1561 printk(KERN_DEBUG "rt6_redirect: source isn't a valid nexthop "
1562 "for redirect target\n");
1563 goto out;
1567 * We have finally decided to accept it.
1570 neigh_update(neigh, lladdr, NUD_STALE,
1571 NEIGH_UPDATE_F_WEAK_OVERRIDE|
1572 NEIGH_UPDATE_F_OVERRIDE|
1573 (on_link ? 0 : (NEIGH_UPDATE_F_OVERRIDE_ISROUTER|
1574 NEIGH_UPDATE_F_ISROUTER))
1578 * Redirect received -> path was valid.
1579 * Look, redirects are sent only in response to data packets,
1580 * so that this nexthop apparently is reachable. --ANK
1582 dst_confirm(&rt->dst);
1584 /* Duplicate redirect: silently ignore. */
1585 if (neigh == dst_get_neighbour_raw(&rt->dst))
1586 goto out;
1588 nrt = ip6_rt_copy(rt);
1589 if (nrt == NULL)
1590 goto out;
1592 nrt->rt6i_flags = RTF_GATEWAY|RTF_UP|RTF_DYNAMIC|RTF_CACHE;
1593 if (on_link)
1594 nrt->rt6i_flags &= ~RTF_GATEWAY;
1596 ipv6_addr_copy(&nrt->rt6i_dst.addr, dest);
1597 nrt->rt6i_dst.plen = 128;
1598 nrt->dst.flags |= DST_HOST;
1600 ipv6_addr_copy(&nrt->rt6i_gateway, (struct in6_addr*)neigh->primary_key);
1601 dst_set_neighbour(&nrt->dst, neigh_clone(neigh));
1603 if (ip6_ins_rt(nrt))
1604 goto out;
1606 netevent.old = &rt->dst;
1607 netevent.new = &nrt->dst;
1608 call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);
1610 if (rt->rt6i_flags&RTF_CACHE) {
1611 ip6_del_rt(rt);
1612 return;
1615 out:
1616 dst_release(&rt->dst);
1620 * Handle ICMP "packet too big" messages
1621 * i.e. Path MTU discovery
1624 static void rt6_do_pmtu_disc(const struct in6_addr *daddr, const struct in6_addr *saddr,
1625 struct net *net, u32 pmtu, int ifindex)
1627 struct rt6_info *rt, *nrt;
1628 int allfrag = 0;
1629 again:
1630 rt = rt6_lookup(net, daddr, saddr, ifindex, 0);
1631 if (rt == NULL)
1632 return;
1634 if (rt6_check_expired(rt)) {
1635 ip6_del_rt(rt);
1636 goto again;
1639 if (pmtu >= dst_mtu(&rt->dst))
1640 goto out;
1642 if (pmtu < IPV6_MIN_MTU) {
1644 * According to RFC2460, PMTU is set to the IPv6 Minimum Link
1645 * MTU (1280) and a fragment header should always be included
1646 * after a node receiving Too Big message reporting PMTU is
1647 * less than the IPv6 Minimum Link MTU.
1649 pmtu = IPV6_MIN_MTU;
1650 allfrag = 1;
1653 /* New mtu received -> path was valid.
1654 They are sent only in response to data packets,
1655 so that this nexthop apparently is reachable. --ANK
1657 dst_confirm(&rt->dst);
1659 /* Host route. If it is static, it would be better
1660 not to override it, but add new one, so that
1661 when cache entry will expire old pmtu
1662 would return automatically.
1664 if (rt->rt6i_flags & RTF_CACHE) {
1665 dst_metric_set(&rt->dst, RTAX_MTU, pmtu);
1666 if (allfrag) {
1667 u32 features = dst_metric(&rt->dst, RTAX_FEATURES);
1668 features |= RTAX_FEATURE_ALLFRAG;
1669 dst_metric_set(&rt->dst, RTAX_FEATURES, features);
1671 dst_set_expires(&rt->dst, net->ipv6.sysctl.ip6_rt_mtu_expires);
1672 rt->rt6i_flags |= RTF_MODIFIED|RTF_EXPIRES;
1673 goto out;
1676 /* Network route.
1677 Two cases are possible:
1678 1. It is connected route. Action: COW
1679 2. It is gatewayed route or NONEXTHOP route. Action: clone it.
1681 if (!dst_get_neighbour_raw(&rt->dst) && !(rt->rt6i_flags & RTF_NONEXTHOP))
1682 nrt = rt6_alloc_cow(rt, daddr, saddr);
1683 else
1684 nrt = rt6_alloc_clone(rt, daddr);
1686 if (nrt) {
1687 dst_metric_set(&nrt->dst, RTAX_MTU, pmtu);
1688 if (allfrag) {
1689 u32 features = dst_metric(&nrt->dst, RTAX_FEATURES);
1690 features |= RTAX_FEATURE_ALLFRAG;
1691 dst_metric_set(&nrt->dst, RTAX_FEATURES, features);
1694 /* According to RFC 1981, detecting PMTU increase shouldn't be
1695 * happened within 5 mins, the recommended timer is 10 mins.
1696 * Here this route expiration time is set to ip6_rt_mtu_expires
1697 * which is 10 mins. After 10 mins the decreased pmtu is expired
1698 * and detecting PMTU increase will be automatically happened.
1700 dst_set_expires(&nrt->dst, net->ipv6.sysctl.ip6_rt_mtu_expires);
1701 nrt->rt6i_flags |= RTF_DYNAMIC|RTF_EXPIRES;
1703 ip6_ins_rt(nrt);
1705 out:
1706 dst_release(&rt->dst);
1709 void rt6_pmtu_discovery(const struct in6_addr *daddr, const struct in6_addr *saddr,
1710 struct net_device *dev, u32 pmtu)
1712 struct net *net = dev_net(dev);
1715 * RFC 1981 states that a node "MUST reduce the size of the packets it
1716 * is sending along the path" that caused the Packet Too Big message.
1717 * Since it's not possible in the general case to determine which
1718 * interface was used to send the original packet, we update the MTU
1719 * on the interface that will be used to send future packets. We also
1720 * update the MTU on the interface that received the Packet Too Big in
1721 * case the original packet was forced out that interface with
1722 * SO_BINDTODEVICE or similar. This is the next best thing to the
1723 * correct behaviour, which would be to update the MTU on all
1724 * interfaces.
1726 rt6_do_pmtu_disc(daddr, saddr, net, pmtu, 0);
1727 rt6_do_pmtu_disc(daddr, saddr, net, pmtu, dev->ifindex);
1731 * Misc support functions
1734 static struct rt6_info * ip6_rt_copy(struct rt6_info *ort)
1736 struct net *net = dev_net(ort->rt6i_dev);
1737 struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops,
1738 ort->dst.dev, 0);
1740 if (rt) {
1741 rt->dst.input = ort->dst.input;
1742 rt->dst.output = ort->dst.output;
1744 dst_copy_metrics(&rt->dst, &ort->dst);
1745 rt->dst.error = ort->dst.error;
1746 rt->rt6i_idev = ort->rt6i_idev;
1747 if (rt->rt6i_idev)
1748 in6_dev_hold(rt->rt6i_idev);
1749 rt->dst.lastuse = jiffies;
1750 rt->rt6i_expires = 0;
1752 ipv6_addr_copy(&rt->rt6i_gateway, &ort->rt6i_gateway);
1753 rt->rt6i_flags = ort->rt6i_flags & ~RTF_EXPIRES;
1754 rt->rt6i_metric = 0;
1756 memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
1757 #ifdef CONFIG_IPV6_SUBTREES
1758 memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
1759 #endif
1760 memcpy(&rt->rt6i_prefsrc, &ort->rt6i_prefsrc, sizeof(struct rt6key));
1761 rt->rt6i_table = ort->rt6i_table;
1763 return rt;
1766 #ifdef CONFIG_IPV6_ROUTE_INFO
1767 static struct rt6_info *rt6_get_route_info(struct net *net,
1768 const struct in6_addr *prefix, int prefixlen,
1769 const struct in6_addr *gwaddr, int ifindex)
1771 struct fib6_node *fn;
1772 struct rt6_info *rt = NULL;
1773 struct fib6_table *table;
1775 table = fib6_get_table(net, RT6_TABLE_INFO);
1776 if (table == NULL)
1777 return NULL;
1779 write_lock_bh(&table->tb6_lock);
1780 fn = fib6_locate(&table->tb6_root, prefix ,prefixlen, NULL, 0);
1781 if (!fn)
1782 goto out;
1784 for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
1785 if (rt->rt6i_dev->ifindex != ifindex)
1786 continue;
1787 if ((rt->rt6i_flags & (RTF_ROUTEINFO|RTF_GATEWAY)) != (RTF_ROUTEINFO|RTF_GATEWAY))
1788 continue;
1789 if (!ipv6_addr_equal(&rt->rt6i_gateway, gwaddr))
1790 continue;
1791 dst_hold(&rt->dst);
1792 break;
1794 out:
1795 write_unlock_bh(&table->tb6_lock);
1796 return rt;
1799 static struct rt6_info *rt6_add_route_info(struct net *net,
1800 const struct in6_addr *prefix, int prefixlen,
1801 const struct in6_addr *gwaddr, int ifindex,
1802 unsigned pref)
1804 struct fib6_config cfg = {
1805 .fc_table = RT6_TABLE_INFO,
1806 .fc_metric = IP6_RT_PRIO_USER,
1807 .fc_ifindex = ifindex,
1808 .fc_dst_len = prefixlen,
1809 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
1810 RTF_UP | RTF_PREF(pref),
1811 .fc_nlinfo.pid = 0,
1812 .fc_nlinfo.nlh = NULL,
1813 .fc_nlinfo.nl_net = net,
1816 ipv6_addr_copy(&cfg.fc_dst, prefix);
1817 ipv6_addr_copy(&cfg.fc_gateway, gwaddr);
1819 /* We should treat it as a default route if prefix length is 0. */
1820 if (!prefixlen)
1821 cfg.fc_flags |= RTF_DEFAULT;
1823 ip6_route_add(&cfg);
1825 return rt6_get_route_info(net, prefix, prefixlen, gwaddr, ifindex);
1827 #endif
1829 struct rt6_info *rt6_get_dflt_router(const struct in6_addr *addr, struct net_device *dev)
1831 struct rt6_info *rt;
1832 struct fib6_table *table;
1834 table = fib6_get_table(dev_net(dev), RT6_TABLE_DFLT);
1835 if (table == NULL)
1836 return NULL;
1838 write_lock_bh(&table->tb6_lock);
1839 for (rt = table->tb6_root.leaf; rt; rt=rt->dst.rt6_next) {
1840 if (dev == rt->rt6i_dev &&
1841 ((rt->rt6i_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) &&
1842 ipv6_addr_equal(&rt->rt6i_gateway, addr))
1843 break;
1845 if (rt)
1846 dst_hold(&rt->dst);
1847 write_unlock_bh(&table->tb6_lock);
1848 return rt;
1851 struct rt6_info *rt6_add_dflt_router(const struct in6_addr *gwaddr,
1852 struct net_device *dev,
1853 unsigned int pref)
1855 struct fib6_config cfg = {
1856 .fc_table = RT6_TABLE_DFLT,
1857 .fc_metric = IP6_RT_PRIO_USER,
1858 .fc_ifindex = dev->ifindex,
1859 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
1860 RTF_UP | RTF_EXPIRES | RTF_PREF(pref),
1861 .fc_nlinfo.pid = 0,
1862 .fc_nlinfo.nlh = NULL,
1863 .fc_nlinfo.nl_net = dev_net(dev),
1866 ipv6_addr_copy(&cfg.fc_gateway, gwaddr);
1868 ip6_route_add(&cfg);
1870 return rt6_get_dflt_router(gwaddr, dev);
1873 void rt6_purge_dflt_routers(struct net *net)
1875 struct rt6_info *rt;
1876 struct fib6_table *table;
1878 /* NOTE: Keep consistent with rt6_get_dflt_router */
1879 table = fib6_get_table(net, RT6_TABLE_DFLT);
1880 if (table == NULL)
1881 return;
1883 restart:
1884 read_lock_bh(&table->tb6_lock);
1885 for (rt = table->tb6_root.leaf; rt; rt = rt->dst.rt6_next) {
1886 if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) {
1887 dst_hold(&rt->dst);
1888 read_unlock_bh(&table->tb6_lock);
1889 ip6_del_rt(rt);
1890 goto restart;
1893 read_unlock_bh(&table->tb6_lock);
1896 static void rtmsg_to_fib6_config(struct net *net,
1897 struct in6_rtmsg *rtmsg,
1898 struct fib6_config *cfg)
1900 memset(cfg, 0, sizeof(*cfg));
1902 cfg->fc_table = RT6_TABLE_MAIN;
1903 cfg->fc_ifindex = rtmsg->rtmsg_ifindex;
1904 cfg->fc_metric = rtmsg->rtmsg_metric;
1905 cfg->fc_expires = rtmsg->rtmsg_info;
1906 cfg->fc_dst_len = rtmsg->rtmsg_dst_len;
1907 cfg->fc_src_len = rtmsg->rtmsg_src_len;
1908 cfg->fc_flags = rtmsg->rtmsg_flags;
1910 cfg->fc_nlinfo.nl_net = net;
1912 ipv6_addr_copy(&cfg->fc_dst, &rtmsg->rtmsg_dst);
1913 ipv6_addr_copy(&cfg->fc_src, &rtmsg->rtmsg_src);
1914 ipv6_addr_copy(&cfg->fc_gateway, &rtmsg->rtmsg_gateway);
1917 int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg)
1919 struct fib6_config cfg;
1920 struct in6_rtmsg rtmsg;
1921 int err;
1923 switch(cmd) {
1924 case SIOCADDRT: /* Add a route */
1925 case SIOCDELRT: /* Delete a route */
1926 if (!capable(CAP_NET_ADMIN))
1927 return -EPERM;
1928 err = copy_from_user(&rtmsg, arg,
1929 sizeof(struct in6_rtmsg));
1930 if (err)
1931 return -EFAULT;
1933 rtmsg_to_fib6_config(net, &rtmsg, &cfg);
1935 rtnl_lock();
1936 switch (cmd) {
1937 case SIOCADDRT:
1938 err = ip6_route_add(&cfg);
1939 break;
1940 case SIOCDELRT:
1941 err = ip6_route_del(&cfg);
1942 break;
1943 default:
1944 err = -EINVAL;
1946 rtnl_unlock();
1948 return err;
1951 return -EINVAL;
1955 * Drop the packet on the floor
1958 static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
1960 int type;
1961 struct dst_entry *dst = skb_dst(skb);
1962 switch (ipstats_mib_noroutes) {
1963 case IPSTATS_MIB_INNOROUTES:
1964 type = ipv6_addr_type(&ipv6_hdr(skb)->daddr);
1965 if (type == IPV6_ADDR_ANY) {
1966 IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst),
1967 IPSTATS_MIB_INADDRERRORS);
1968 break;
1970 /* FALLTHROUGH */
1971 case IPSTATS_MIB_OUTNOROUTES:
1972 IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst),
1973 ipstats_mib_noroutes);
1974 break;
1976 icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0);
1977 kfree_skb(skb);
1978 return 0;
1981 static int ip6_pkt_discard(struct sk_buff *skb)
1983 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_INNOROUTES);
1986 static int ip6_pkt_discard_out(struct sk_buff *skb)
1988 skb->dev = skb_dst(skb)->dev;
1989 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES);
1992 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
1994 static int ip6_pkt_prohibit(struct sk_buff *skb)
1996 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES);
1999 static int ip6_pkt_prohibit_out(struct sk_buff *skb)
2001 skb->dev = skb_dst(skb)->dev;
2002 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES);
2005 #endif
2008 * Allocate a dst for local (unicast / anycast) address.
2011 struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
2012 const struct in6_addr *addr,
2013 int anycast)
2015 struct net *net = dev_net(idev->dev);
2016 struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops,
2017 net->loopback_dev, 0);
2018 struct neighbour *neigh;
2020 if (rt == NULL) {
2021 if (net_ratelimit())
2022 pr_warning("IPv6: Maximum number of routes reached,"
2023 " consider increasing route/max_size.\n");
2024 return ERR_PTR(-ENOMEM);
2027 in6_dev_hold(idev);
2029 rt->dst.flags |= DST_HOST;
2030 rt->dst.input = ip6_input;
2031 rt->dst.output = ip6_output;
2032 rt->rt6i_idev = idev;
2033 rt->dst.obsolete = -1;
2035 rt->rt6i_flags = RTF_UP | RTF_NONEXTHOP;
2036 if (anycast)
2037 rt->rt6i_flags |= RTF_ANYCAST;
2038 else
2039 rt->rt6i_flags |= RTF_LOCAL;
2040 neigh = ndisc_get_neigh(rt->rt6i_dev, &rt->rt6i_gateway);
2041 if (IS_ERR(neigh)) {
2042 dst_free(&rt->dst);
2044 return ERR_CAST(neigh);
2046 dst_set_neighbour(&rt->dst, neigh);
2048 ipv6_addr_copy(&rt->rt6i_dst.addr, addr);
2049 rt->rt6i_dst.plen = 128;
2050 rt->rt6i_table = fib6_get_table(net, RT6_TABLE_LOCAL);
2052 atomic_set(&rt->dst.__refcnt, 1);
2054 return rt;
2057 int ip6_route_get_saddr(struct net *net,
2058 struct rt6_info *rt,
2059 const struct in6_addr *daddr,
2060 unsigned int prefs,
2061 struct in6_addr *saddr)
2063 struct inet6_dev *idev = ip6_dst_idev((struct dst_entry*)rt);
2064 int err = 0;
2065 if (rt->rt6i_prefsrc.plen)
2066 ipv6_addr_copy(saddr, &rt->rt6i_prefsrc.addr);
2067 else
2068 err = ipv6_dev_get_saddr(net, idev ? idev->dev : NULL,
2069 daddr, prefs, saddr);
2070 return err;
2073 /* remove deleted ip from prefsrc entries */
2074 struct arg_dev_net_ip {
2075 struct net_device *dev;
2076 struct net *net;
2077 struct in6_addr *addr;
2080 static int fib6_remove_prefsrc(struct rt6_info *rt, void *arg)
2082 struct net_device *dev = ((struct arg_dev_net_ip *)arg)->dev;
2083 struct net *net = ((struct arg_dev_net_ip *)arg)->net;
2084 struct in6_addr *addr = ((struct arg_dev_net_ip *)arg)->addr;
2086 if (((void *)rt->rt6i_dev == dev || dev == NULL) &&
2087 rt != net->ipv6.ip6_null_entry &&
2088 ipv6_addr_equal(addr, &rt->rt6i_prefsrc.addr)) {
2089 /* remove prefsrc entry */
2090 rt->rt6i_prefsrc.plen = 0;
2092 return 0;
2095 void rt6_remove_prefsrc(struct inet6_ifaddr *ifp)
2097 struct net *net = dev_net(ifp->idev->dev);
2098 struct arg_dev_net_ip adni = {
2099 .dev = ifp->idev->dev,
2100 .net = net,
2101 .addr = &ifp->addr,
2103 fib6_clean_all(net, fib6_remove_prefsrc, 0, &adni);
2106 struct arg_dev_net {
2107 struct net_device *dev;
2108 struct net *net;
2111 static int fib6_ifdown(struct rt6_info *rt, void *arg)
2113 const struct arg_dev_net *adn = arg;
2114 const struct net_device *dev = adn->dev;
2116 if ((rt->rt6i_dev == dev || dev == NULL) &&
2117 rt != adn->net->ipv6.ip6_null_entry) {
2118 RT6_TRACE("deleted by ifdown %p\n", rt);
2119 return -1;
2121 return 0;
2124 void rt6_ifdown(struct net *net, struct net_device *dev)
2126 struct arg_dev_net adn = {
2127 .dev = dev,
2128 .net = net,
2131 fib6_clean_all(net, fib6_ifdown, 0, &adn);
2132 icmp6_clean_all(fib6_ifdown, &adn);
2135 struct rt6_mtu_change_arg
2137 struct net_device *dev;
2138 unsigned mtu;
2141 static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
2143 struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *) p_arg;
2144 struct inet6_dev *idev;
2146 /* In IPv6 pmtu discovery is not optional,
2147 so that RTAX_MTU lock cannot disable it.
2148 We still use this lock to block changes
2149 caused by addrconf/ndisc.
2152 idev = __in6_dev_get(arg->dev);
2153 if (idev == NULL)
2154 return 0;
2156 /* For administrative MTU increase, there is no way to discover
2157 IPv6 PMTU increase, so PMTU increase should be updated here.
2158 Since RFC 1981 doesn't include administrative MTU increase
2159 update PMTU increase is a MUST. (i.e. jumbo frame)
2162 If new MTU is less than route PMTU, this new MTU will be the
2163 lowest MTU in the path, update the route PMTU to reflect PMTU
2164 decreases; if new MTU is greater than route PMTU, and the
2165 old MTU is the lowest MTU in the path, update the route PMTU
2166 to reflect the increase. In this case if the other nodes' MTU
2167 also have the lowest MTU, TOO BIG MESSAGE will be lead to
2168 PMTU discouvery.
2170 if (rt->rt6i_dev == arg->dev &&
2171 !dst_metric_locked(&rt->dst, RTAX_MTU) &&
2172 (dst_mtu(&rt->dst) >= arg->mtu ||
2173 (dst_mtu(&rt->dst) < arg->mtu &&
2174 dst_mtu(&rt->dst) == idev->cnf.mtu6))) {
2175 dst_metric_set(&rt->dst, RTAX_MTU, arg->mtu);
2177 return 0;
2180 void rt6_mtu_change(struct net_device *dev, unsigned mtu)
2182 struct rt6_mtu_change_arg arg = {
2183 .dev = dev,
2184 .mtu = mtu,
2187 fib6_clean_all(dev_net(dev), rt6_mtu_change_route, 0, &arg);
2190 static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
2191 [RTA_GATEWAY] = { .len = sizeof(struct in6_addr) },
2192 [RTA_OIF] = { .type = NLA_U32 },
2193 [RTA_IIF] = { .type = NLA_U32 },
2194 [RTA_PRIORITY] = { .type = NLA_U32 },
2195 [RTA_METRICS] = { .type = NLA_NESTED },
2198 static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
2199 struct fib6_config *cfg)
2201 struct rtmsg *rtm;
2202 struct nlattr *tb[RTA_MAX+1];
2203 int err;
2205 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy);
2206 if (err < 0)
2207 goto errout;
2209 err = -EINVAL;
2210 rtm = nlmsg_data(nlh);
2211 memset(cfg, 0, sizeof(*cfg));
2213 cfg->fc_table = rtm->rtm_table;
2214 cfg->fc_dst_len = rtm->rtm_dst_len;
2215 cfg->fc_src_len = rtm->rtm_src_len;
2216 cfg->fc_flags = RTF_UP;
2217 cfg->fc_protocol = rtm->rtm_protocol;
2219 if (rtm->rtm_type == RTN_UNREACHABLE)
2220 cfg->fc_flags |= RTF_REJECT;
2222 if (rtm->rtm_type == RTN_LOCAL)
2223 cfg->fc_flags |= RTF_LOCAL;
2225 cfg->fc_nlinfo.pid = NETLINK_CB(skb).pid;
2226 cfg->fc_nlinfo.nlh = nlh;
2227 cfg->fc_nlinfo.nl_net = sock_net(skb->sk);
2229 if (tb[RTA_GATEWAY]) {
2230 nla_memcpy(&cfg->fc_gateway, tb[RTA_GATEWAY], 16);
2231 cfg->fc_flags |= RTF_GATEWAY;
2234 if (tb[RTA_DST]) {
2235 int plen = (rtm->rtm_dst_len + 7) >> 3;
2237 if (nla_len(tb[RTA_DST]) < plen)
2238 goto errout;
2240 nla_memcpy(&cfg->fc_dst, tb[RTA_DST], plen);
2243 if (tb[RTA_SRC]) {
2244 int plen = (rtm->rtm_src_len + 7) >> 3;
2246 if (nla_len(tb[RTA_SRC]) < plen)
2247 goto errout;
2249 nla_memcpy(&cfg->fc_src, tb[RTA_SRC], plen);
2252 if (tb[RTA_PREFSRC])
2253 nla_memcpy(&cfg->fc_prefsrc, tb[RTA_PREFSRC], 16);
2255 if (tb[RTA_OIF])
2256 cfg->fc_ifindex = nla_get_u32(tb[RTA_OIF]);
2258 if (tb[RTA_PRIORITY])
2259 cfg->fc_metric = nla_get_u32(tb[RTA_PRIORITY]);
2261 if (tb[RTA_METRICS]) {
2262 cfg->fc_mx = nla_data(tb[RTA_METRICS]);
2263 cfg->fc_mx_len = nla_len(tb[RTA_METRICS]);
2266 if (tb[RTA_TABLE])
2267 cfg->fc_table = nla_get_u32(tb[RTA_TABLE]);
2269 err = 0;
2270 errout:
2271 return err;
2274 static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
2276 struct fib6_config cfg;
2277 int err;
2279 err = rtm_to_fib6_config(skb, nlh, &cfg);
2280 if (err < 0)
2281 return err;
2283 return ip6_route_del(&cfg);
2286 static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
2288 struct fib6_config cfg;
2289 int err;
2291 err = rtm_to_fib6_config(skb, nlh, &cfg);
2292 if (err < 0)
2293 return err;
2295 return ip6_route_add(&cfg);
2298 static inline size_t rt6_nlmsg_size(void)
2300 return NLMSG_ALIGN(sizeof(struct rtmsg))
2301 + nla_total_size(16) /* RTA_SRC */
2302 + nla_total_size(16) /* RTA_DST */
2303 + nla_total_size(16) /* RTA_GATEWAY */
2304 + nla_total_size(16) /* RTA_PREFSRC */
2305 + nla_total_size(4) /* RTA_TABLE */
2306 + nla_total_size(4) /* RTA_IIF */
2307 + nla_total_size(4) /* RTA_OIF */
2308 + nla_total_size(4) /* RTA_PRIORITY */
2309 + RTAX_MAX * nla_total_size(4) /* RTA_METRICS */
2310 + nla_total_size(sizeof(struct rta_cacheinfo));
2313 static int rt6_fill_node(struct net *net,
2314 struct sk_buff *skb, struct rt6_info *rt,
2315 struct in6_addr *dst, struct in6_addr *src,
2316 int iif, int type, u32 pid, u32 seq,
2317 int prefix, int nowait, unsigned int flags)
2319 struct rtmsg *rtm;
2320 struct nlmsghdr *nlh;
2321 long expires;
2322 u32 table;
2323 struct neighbour *n;
2325 if (prefix) { /* user wants prefix routes only */
2326 if (!(rt->rt6i_flags & RTF_PREFIX_RT)) {
2327 /* success since this is not a prefix route */
2328 return 1;
2332 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*rtm), flags);
2333 if (nlh == NULL)
2334 return -EMSGSIZE;
2336 rtm = nlmsg_data(nlh);
2337 rtm->rtm_family = AF_INET6;
2338 rtm->rtm_dst_len = rt->rt6i_dst.plen;
2339 rtm->rtm_src_len = rt->rt6i_src.plen;
2340 rtm->rtm_tos = 0;
2341 if (rt->rt6i_table)
2342 table = rt->rt6i_table->tb6_id;
2343 else
2344 table = RT6_TABLE_UNSPEC;
2345 rtm->rtm_table = table;
2346 NLA_PUT_U32(skb, RTA_TABLE, table);
2347 if (rt->rt6i_flags&RTF_REJECT)
2348 rtm->rtm_type = RTN_UNREACHABLE;
2349 else if (rt->rt6i_flags&RTF_LOCAL)
2350 rtm->rtm_type = RTN_LOCAL;
2351 else if (rt->rt6i_dev && (rt->rt6i_dev->flags&IFF_LOOPBACK))
2352 rtm->rtm_type = RTN_LOCAL;
2353 else
2354 rtm->rtm_type = RTN_UNICAST;
2355 rtm->rtm_flags = 0;
2356 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
2357 rtm->rtm_protocol = rt->rt6i_protocol;
2358 if (rt->rt6i_flags&RTF_DYNAMIC)
2359 rtm->rtm_protocol = RTPROT_REDIRECT;
2360 else if (rt->rt6i_flags & RTF_ADDRCONF)
2361 rtm->rtm_protocol = RTPROT_KERNEL;
2362 else if (rt->rt6i_flags&RTF_DEFAULT)
2363 rtm->rtm_protocol = RTPROT_RA;
2365 if (rt->rt6i_flags&RTF_CACHE)
2366 rtm->rtm_flags |= RTM_F_CLONED;
2368 if (dst) {
2369 NLA_PUT(skb, RTA_DST, 16, dst);
2370 rtm->rtm_dst_len = 128;
2371 } else if (rtm->rtm_dst_len)
2372 NLA_PUT(skb, RTA_DST, 16, &rt->rt6i_dst.addr);
2373 #ifdef CONFIG_IPV6_SUBTREES
2374 if (src) {
2375 NLA_PUT(skb, RTA_SRC, 16, src);
2376 rtm->rtm_src_len = 128;
2377 } else if (rtm->rtm_src_len)
2378 NLA_PUT(skb, RTA_SRC, 16, &rt->rt6i_src.addr);
2379 #endif
2380 if (iif) {
2381 #ifdef CONFIG_IPV6_MROUTE
2382 if (ipv6_addr_is_multicast(&rt->rt6i_dst.addr)) {
2383 int err = ip6mr_get_route(net, skb, rtm, nowait);
2384 if (err <= 0) {
2385 if (!nowait) {
2386 if (err == 0)
2387 return 0;
2388 goto nla_put_failure;
2389 } else {
2390 if (err == -EMSGSIZE)
2391 goto nla_put_failure;
2394 } else
2395 #endif
2396 NLA_PUT_U32(skb, RTA_IIF, iif);
2397 } else if (dst) {
2398 struct in6_addr saddr_buf;
2399 if (ip6_route_get_saddr(net, rt, dst, 0, &saddr_buf) == 0)
2400 NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf);
2403 if (rt->rt6i_prefsrc.plen) {
2404 struct in6_addr saddr_buf;
2405 ipv6_addr_copy(&saddr_buf, &rt->rt6i_prefsrc.addr);
2406 NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf);
2409 if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
2410 goto nla_put_failure;
2412 rcu_read_lock();
2413 n = dst_get_neighbour(&rt->dst);
2414 if (n) {
2415 if (nla_put(skb, RTA_GATEWAY, 16, &n->primary_key) < 0) {
2416 rcu_read_unlock();
2417 goto nla_put_failure;
2420 rcu_read_unlock();
2422 if (rt->dst.dev)
2423 NLA_PUT_U32(skb, RTA_OIF, rt->rt6i_dev->ifindex);
2425 NLA_PUT_U32(skb, RTA_PRIORITY, rt->rt6i_metric);
2427 if (!(rt->rt6i_flags & RTF_EXPIRES))
2428 expires = 0;
2429 else if (rt->rt6i_expires - jiffies < INT_MAX)
2430 expires = rt->rt6i_expires - jiffies;
2431 else
2432 expires = INT_MAX;
2434 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, 0, 0,
2435 expires, rt->dst.error) < 0)
2436 goto nla_put_failure;
2438 return nlmsg_end(skb, nlh);
2440 nla_put_failure:
2441 nlmsg_cancel(skb, nlh);
2442 return -EMSGSIZE;
2445 int rt6_dump_route(struct rt6_info *rt, void *p_arg)
2447 struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg;
2448 int prefix;
2450 if (nlmsg_len(arg->cb->nlh) >= sizeof(struct rtmsg)) {
2451 struct rtmsg *rtm = nlmsg_data(arg->cb->nlh);
2452 prefix = (rtm->rtm_flags & RTM_F_PREFIX) != 0;
2453 } else
2454 prefix = 0;
2456 return rt6_fill_node(arg->net,
2457 arg->skb, rt, NULL, NULL, 0, RTM_NEWROUTE,
2458 NETLINK_CB(arg->cb->skb).pid, arg->cb->nlh->nlmsg_seq,
2459 prefix, 0, NLM_F_MULTI);
2462 static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg)
2464 struct net *net = sock_net(in_skb->sk);
2465 struct nlattr *tb[RTA_MAX+1];
2466 struct rt6_info *rt;
2467 struct sk_buff *skb;
2468 struct rtmsg *rtm;
2469 struct flowi6 fl6;
2470 int err, iif = 0;
2472 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy);
2473 if (err < 0)
2474 goto errout;
2476 err = -EINVAL;
2477 memset(&fl6, 0, sizeof(fl6));
2479 if (tb[RTA_SRC]) {
2480 if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr))
2481 goto errout;
2483 ipv6_addr_copy(&fl6.saddr, nla_data(tb[RTA_SRC]));
2486 if (tb[RTA_DST]) {
2487 if (nla_len(tb[RTA_DST]) < sizeof(struct in6_addr))
2488 goto errout;
2490 ipv6_addr_copy(&fl6.daddr, nla_data(tb[RTA_DST]));
2493 if (tb[RTA_IIF])
2494 iif = nla_get_u32(tb[RTA_IIF]);
2496 if (tb[RTA_OIF])
2497 fl6.flowi6_oif = nla_get_u32(tb[RTA_OIF]);
2499 if (iif) {
2500 struct net_device *dev;
2501 dev = __dev_get_by_index(net, iif);
2502 if (!dev) {
2503 err = -ENODEV;
2504 goto errout;
2508 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2509 if (skb == NULL) {
2510 err = -ENOBUFS;
2511 goto errout;
2514 /* Reserve room for dummy headers, this skb can pass
2515 through good chunk of routing engine.
2517 skb_reset_mac_header(skb);
2518 skb_reserve(skb, MAX_HEADER + sizeof(struct ipv6hdr));
2520 rt = (struct rt6_info*) ip6_route_output(net, NULL, &fl6);
2521 skb_dst_set(skb, &rt->dst);
2523 err = rt6_fill_node(net, skb, rt, &fl6.daddr, &fl6.saddr, iif,
2524 RTM_NEWROUTE, NETLINK_CB(in_skb).pid,
2525 nlh->nlmsg_seq, 0, 0, 0);
2526 if (err < 0) {
2527 kfree_skb(skb);
2528 goto errout;
2531 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).pid);
2532 errout:
2533 return err;
2536 void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info)
2538 struct sk_buff *skb;
2539 struct net *net = info->nl_net;
2540 u32 seq;
2541 int err;
2543 err = -ENOBUFS;
2544 seq = info->nlh != NULL ? info->nlh->nlmsg_seq : 0;
2546 skb = nlmsg_new(rt6_nlmsg_size(), gfp_any());
2547 if (skb == NULL)
2548 goto errout;
2550 err = rt6_fill_node(net, skb, rt, NULL, NULL, 0,
2551 event, info->pid, seq, 0, 0, 0);
2552 if (err < 0) {
2553 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
2554 WARN_ON(err == -EMSGSIZE);
2555 kfree_skb(skb);
2556 goto errout;
2558 rtnl_notify(skb, net, info->pid, RTNLGRP_IPV6_ROUTE,
2559 info->nlh, gfp_any());
2560 return;
2561 errout:
2562 if (err < 0)
2563 rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
2566 static int ip6_route_dev_notify(struct notifier_block *this,
2567 unsigned long event, void *data)
2569 struct net_device *dev = (struct net_device *)data;
2570 struct net *net = dev_net(dev);
2572 if (event == NETDEV_REGISTER && (dev->flags & IFF_LOOPBACK)) {
2573 net->ipv6.ip6_null_entry->dst.dev = dev;
2574 net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev);
2575 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
2576 net->ipv6.ip6_prohibit_entry->dst.dev = dev;
2577 net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev);
2578 net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
2579 net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
2580 #endif
2583 return NOTIFY_OK;
2587 * /proc
2590 #ifdef CONFIG_PROC_FS
2592 struct rt6_proc_arg
2594 char *buffer;
2595 int offset;
2596 int length;
2597 int skip;
2598 int len;
2601 static int rt6_info_route(struct rt6_info *rt, void *p_arg)
2603 struct seq_file *m = p_arg;
2604 struct neighbour *n;
2606 seq_printf(m, "%pi6 %02x ", &rt->rt6i_dst.addr, rt->rt6i_dst.plen);
2608 #ifdef CONFIG_IPV6_SUBTREES
2609 seq_printf(m, "%pi6 %02x ", &rt->rt6i_src.addr, rt->rt6i_src.plen);
2610 #else
2611 seq_puts(m, "00000000000000000000000000000000 00 ");
2612 #endif
2613 rcu_read_lock();
2614 n = dst_get_neighbour(&rt->dst);
2615 if (n) {
2616 seq_printf(m, "%pi6", n->primary_key);
2617 } else {
2618 seq_puts(m, "00000000000000000000000000000000");
2620 rcu_read_unlock();
2621 seq_printf(m, " %08x %08x %08x %08x %8s\n",
2622 rt->rt6i_metric, atomic_read(&rt->dst.__refcnt),
2623 rt->dst.__use, rt->rt6i_flags,
2624 rt->rt6i_dev ? rt->rt6i_dev->name : "");
2625 return 0;
2628 static int ipv6_route_show(struct seq_file *m, void *v)
2630 struct net *net = (struct net *)m->private;
2631 fib6_clean_all(net, rt6_info_route, 0, m);
2632 return 0;
2635 static int ipv6_route_open(struct inode *inode, struct file *file)
2637 return single_open_net(inode, file, ipv6_route_show);
2640 static const struct file_operations ipv6_route_proc_fops = {
2641 .owner = THIS_MODULE,
2642 .open = ipv6_route_open,
2643 .read = seq_read,
2644 .llseek = seq_lseek,
2645 .release = single_release_net,
2648 static int rt6_stats_seq_show(struct seq_file *seq, void *v)
2650 struct net *net = (struct net *)seq->private;
2651 seq_printf(seq, "%04x %04x %04x %04x %04x %04x %04x\n",
2652 net->ipv6.rt6_stats->fib_nodes,
2653 net->ipv6.rt6_stats->fib_route_nodes,
2654 net->ipv6.rt6_stats->fib_rt_alloc,
2655 net->ipv6.rt6_stats->fib_rt_entries,
2656 net->ipv6.rt6_stats->fib_rt_cache,
2657 dst_entries_get_slow(&net->ipv6.ip6_dst_ops),
2658 net->ipv6.rt6_stats->fib_discarded_routes);
2660 return 0;
2663 static int rt6_stats_seq_open(struct inode *inode, struct file *file)
2665 return single_open_net(inode, file, rt6_stats_seq_show);
2668 static const struct file_operations rt6_stats_seq_fops = {
2669 .owner = THIS_MODULE,
2670 .open = rt6_stats_seq_open,
2671 .read = seq_read,
2672 .llseek = seq_lseek,
2673 .release = single_release_net,
2675 #endif /* CONFIG_PROC_FS */
2677 #ifdef CONFIG_SYSCTL
2679 static
2680 int ipv6_sysctl_rtcache_flush(ctl_table *ctl, int write,
2681 void __user *buffer, size_t *lenp, loff_t *ppos)
2683 struct net *net;
2684 int delay;
2685 if (!write)
2686 return -EINVAL;
2688 net = (struct net *)ctl->extra1;
2689 delay = net->ipv6.sysctl.flush_delay;
2690 proc_dointvec(ctl, write, buffer, lenp, ppos);
2691 fib6_run_gc(delay <= 0 ? ~0UL : (unsigned long)delay, net);
2692 return 0;
2695 ctl_table ipv6_route_table_template[] = {
2697 .procname = "flush",
2698 .data = &init_net.ipv6.sysctl.flush_delay,
2699 .maxlen = sizeof(int),
2700 .mode = 0200,
2701 .proc_handler = ipv6_sysctl_rtcache_flush
2704 .procname = "gc_thresh",
2705 .data = &ip6_dst_ops_template.gc_thresh,
2706 .maxlen = sizeof(int),
2707 .mode = 0644,
2708 .proc_handler = proc_dointvec,
2711 .procname = "max_size",
2712 .data = &init_net.ipv6.sysctl.ip6_rt_max_size,
2713 .maxlen = sizeof(int),
2714 .mode = 0644,
2715 .proc_handler = proc_dointvec,
2718 .procname = "gc_min_interval",
2719 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
2720 .maxlen = sizeof(int),
2721 .mode = 0644,
2722 .proc_handler = proc_dointvec_jiffies,
2725 .procname = "gc_timeout",
2726 .data = &init_net.ipv6.sysctl.ip6_rt_gc_timeout,
2727 .maxlen = sizeof(int),
2728 .mode = 0644,
2729 .proc_handler = proc_dointvec_jiffies,
2732 .procname = "gc_interval",
2733 .data = &init_net.ipv6.sysctl.ip6_rt_gc_interval,
2734 .maxlen = sizeof(int),
2735 .mode = 0644,
2736 .proc_handler = proc_dointvec_jiffies,
2739 .procname = "gc_elasticity",
2740 .data = &init_net.ipv6.sysctl.ip6_rt_gc_elasticity,
2741 .maxlen = sizeof(int),
2742 .mode = 0644,
2743 .proc_handler = proc_dointvec,
2746 .procname = "mtu_expires",
2747 .data = &init_net.ipv6.sysctl.ip6_rt_mtu_expires,
2748 .maxlen = sizeof(int),
2749 .mode = 0644,
2750 .proc_handler = proc_dointvec_jiffies,
2753 .procname = "min_adv_mss",
2754 .data = &init_net.ipv6.sysctl.ip6_rt_min_advmss,
2755 .maxlen = sizeof(int),
2756 .mode = 0644,
2757 .proc_handler = proc_dointvec,
2760 .procname = "gc_min_interval_ms",
2761 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
2762 .maxlen = sizeof(int),
2763 .mode = 0644,
2764 .proc_handler = proc_dointvec_ms_jiffies,
2769 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
2771 struct ctl_table *table;
2773 table = kmemdup(ipv6_route_table_template,
2774 sizeof(ipv6_route_table_template),
2775 GFP_KERNEL);
2777 if (table) {
2778 table[0].data = &net->ipv6.sysctl.flush_delay;
2779 table[0].extra1 = net;
2780 table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh;
2781 table[2].data = &net->ipv6.sysctl.ip6_rt_max_size;
2782 table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
2783 table[4].data = &net->ipv6.sysctl.ip6_rt_gc_timeout;
2784 table[5].data = &net->ipv6.sysctl.ip6_rt_gc_interval;
2785 table[6].data = &net->ipv6.sysctl.ip6_rt_gc_elasticity;
2786 table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires;
2787 table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss;
2788 table[9].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
2791 return table;
2793 #endif
2795 static int __net_init ip6_route_net_init(struct net *net)
2797 int ret = -ENOMEM;
2799 memcpy(&net->ipv6.ip6_dst_ops, &ip6_dst_ops_template,
2800 sizeof(net->ipv6.ip6_dst_ops));
2802 if (dst_entries_init(&net->ipv6.ip6_dst_ops) < 0)
2803 goto out_ip6_dst_ops;
2805 net->ipv6.ip6_null_entry = kmemdup(&ip6_null_entry_template,
2806 sizeof(*net->ipv6.ip6_null_entry),
2807 GFP_KERNEL);
2808 if (!net->ipv6.ip6_null_entry)
2809 goto out_ip6_dst_entries;
2810 net->ipv6.ip6_null_entry->dst.path =
2811 (struct dst_entry *)net->ipv6.ip6_null_entry;
2812 net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops;
2813 dst_init_metrics(&net->ipv6.ip6_null_entry->dst,
2814 ip6_template_metrics, true);
2816 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
2817 net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template,
2818 sizeof(*net->ipv6.ip6_prohibit_entry),
2819 GFP_KERNEL);
2820 if (!net->ipv6.ip6_prohibit_entry)
2821 goto out_ip6_null_entry;
2822 net->ipv6.ip6_prohibit_entry->dst.path =
2823 (struct dst_entry *)net->ipv6.ip6_prohibit_entry;
2824 net->ipv6.ip6_prohibit_entry->dst.ops = &net->ipv6.ip6_dst_ops;
2825 dst_init_metrics(&net->ipv6.ip6_prohibit_entry->dst,
2826 ip6_template_metrics, true);
2828 net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template,
2829 sizeof(*net->ipv6.ip6_blk_hole_entry),
2830 GFP_KERNEL);
2831 if (!net->ipv6.ip6_blk_hole_entry)
2832 goto out_ip6_prohibit_entry;
2833 net->ipv6.ip6_blk_hole_entry->dst.path =
2834 (struct dst_entry *)net->ipv6.ip6_blk_hole_entry;
2835 net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops;
2836 dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst,
2837 ip6_template_metrics, true);
2838 #endif
2840 net->ipv6.sysctl.flush_delay = 0;
2841 net->ipv6.sysctl.ip6_rt_max_size = 4096;
2842 net->ipv6.sysctl.ip6_rt_gc_min_interval = HZ / 2;
2843 net->ipv6.sysctl.ip6_rt_gc_timeout = 60*HZ;
2844 net->ipv6.sysctl.ip6_rt_gc_interval = 30*HZ;
2845 net->ipv6.sysctl.ip6_rt_gc_elasticity = 9;
2846 net->ipv6.sysctl.ip6_rt_mtu_expires = 10*60*HZ;
2847 net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40;
2849 #ifdef CONFIG_PROC_FS
2850 proc_net_fops_create(net, "ipv6_route", 0, &ipv6_route_proc_fops);
2851 proc_net_fops_create(net, "rt6_stats", S_IRUGO, &rt6_stats_seq_fops);
2852 #endif
2853 net->ipv6.ip6_rt_gc_expire = 30*HZ;
2855 ret = 0;
2856 out:
2857 return ret;
2859 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
2860 out_ip6_prohibit_entry:
2861 kfree(net->ipv6.ip6_prohibit_entry);
2862 out_ip6_null_entry:
2863 kfree(net->ipv6.ip6_null_entry);
2864 #endif
2865 out_ip6_dst_entries:
2866 dst_entries_destroy(&net->ipv6.ip6_dst_ops);
2867 out_ip6_dst_ops:
2868 goto out;
2871 static void __net_exit ip6_route_net_exit(struct net *net)
2873 #ifdef CONFIG_PROC_FS
2874 proc_net_remove(net, "ipv6_route");
2875 proc_net_remove(net, "rt6_stats");
2876 #endif
2877 kfree(net->ipv6.ip6_null_entry);
2878 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
2879 kfree(net->ipv6.ip6_prohibit_entry);
2880 kfree(net->ipv6.ip6_blk_hole_entry);
2881 #endif
2882 dst_entries_destroy(&net->ipv6.ip6_dst_ops);
2885 static struct pernet_operations ip6_route_net_ops = {
2886 .init = ip6_route_net_init,
2887 .exit = ip6_route_net_exit,
2890 static struct notifier_block ip6_route_dev_notifier = {
2891 .notifier_call = ip6_route_dev_notify,
2892 .priority = 0,
2895 int __init ip6_route_init(void)
2897 int ret;
2899 ret = -ENOMEM;
2900 ip6_dst_ops_template.kmem_cachep =
2901 kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0,
2902 SLAB_HWCACHE_ALIGN, NULL);
2903 if (!ip6_dst_ops_template.kmem_cachep)
2904 goto out;
2906 ret = dst_entries_init(&ip6_dst_blackhole_ops);
2907 if (ret)
2908 goto out_kmem_cache;
2910 ret = register_pernet_subsys(&ip6_route_net_ops);
2911 if (ret)
2912 goto out_dst_entries;
2914 ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep;
2916 /* Registering of the loopback is done before this portion of code,
2917 * the loopback reference in rt6_info will not be taken, do it
2918 * manually for init_net */
2919 init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev;
2920 init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
2921 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
2922 init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev;
2923 init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
2924 init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev;
2925 init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
2926 #endif
2927 ret = fib6_init();
2928 if (ret)
2929 goto out_register_subsys;
2931 ret = xfrm6_init();
2932 if (ret)
2933 goto out_fib6_init;
2935 ret = fib6_rules_init();
2936 if (ret)
2937 goto xfrm6_init;
2939 ret = -ENOBUFS;
2940 if (__rtnl_register(PF_INET6, RTM_NEWROUTE, inet6_rtm_newroute, NULL) ||
2941 __rtnl_register(PF_INET6, RTM_DELROUTE, inet6_rtm_delroute, NULL) ||
2942 __rtnl_register(PF_INET6, RTM_GETROUTE, inet6_rtm_getroute, NULL))
2943 goto fib6_rules_init;
2945 ret = register_netdevice_notifier(&ip6_route_dev_notifier);
2946 if (ret)
2947 goto fib6_rules_init;
2949 out:
2950 return ret;
2952 fib6_rules_init:
2953 fib6_rules_cleanup();
2954 xfrm6_init:
2955 xfrm6_fini();
2956 out_fib6_init:
2957 fib6_gc_cleanup();
2958 out_register_subsys:
2959 unregister_pernet_subsys(&ip6_route_net_ops);
2960 out_dst_entries:
2961 dst_entries_destroy(&ip6_dst_blackhole_ops);
2962 out_kmem_cache:
2963 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
2964 goto out;
2967 void ip6_route_cleanup(void)
2969 unregister_netdevice_notifier(&ip6_route_dev_notifier);
2970 fib6_rules_cleanup();
2971 xfrm6_fini();
2972 fib6_gc_cleanup();
2973 unregister_pernet_subsys(&ip6_route_net_ops);
2974 dst_entries_destroy(&ip6_dst_blackhole_ops);
2975 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);