2 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
16 * YOSHIFUJI Hideaki @USAGI
17 * reworked default router selection.
18 * - respect outgoing interface
19 * - select from (probably) reachable routers (i.e.
20 * routers in REACHABLE, STALE, DELAY or PROBE states).
21 * - always select the same router if it is (probably)
22 * reachable. otherwise, round-robin the list.
24 * Fixed routing subtrees.
27 #include <linux/capability.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/times.h>
31 #include <linux/socket.h>
32 #include <linux/sockios.h>
33 #include <linux/net.h>
34 #include <linux/route.h>
35 #include <linux/netdevice.h>
36 #include <linux/in6.h>
37 #include <linux/mroute6.h>
38 #include <linux/init.h>
39 #include <linux/if_arp.h>
40 #include <linux/proc_fs.h>
41 #include <linux/seq_file.h>
42 #include <linux/nsproxy.h>
43 #include <linux/slab.h>
44 #include <net/net_namespace.h>
47 #include <net/ip6_fib.h>
48 #include <net/ip6_route.h>
49 #include <net/ndisc.h>
50 #include <net/addrconf.h>
52 #include <linux/rtnetlink.h>
55 #include <net/netevent.h>
56 #include <net/netlink.h>
58 #include <asm/uaccess.h>
61 #include <linux/sysctl.h>
64 /* Set to 3 to get tracing. */
68 #define RDBG(x) printk x
69 #define RT6_TRACE(x...) printk(KERN_DEBUG x)
72 #define RT6_TRACE(x...) do { ; } while (0)
75 static struct rt6_info
*ip6_rt_copy(const struct rt6_info
*ort
,
76 const struct in6_addr
*dest
);
77 static struct dst_entry
*ip6_dst_check(struct dst_entry
*dst
, u32 cookie
);
78 static unsigned int ip6_default_advmss(const struct dst_entry
*dst
);
79 static unsigned int ip6_default_mtu(const struct dst_entry
*dst
);
80 static struct dst_entry
*ip6_negative_advice(struct dst_entry
*);
81 static void ip6_dst_destroy(struct dst_entry
*);
82 static void ip6_dst_ifdown(struct dst_entry
*,
83 struct net_device
*dev
, int how
);
84 static int ip6_dst_gc(struct dst_ops
*ops
);
86 static int ip6_pkt_discard(struct sk_buff
*skb
);
87 static int ip6_pkt_discard_out(struct sk_buff
*skb
);
88 static void ip6_link_failure(struct sk_buff
*skb
);
89 static void ip6_rt_update_pmtu(struct dst_entry
*dst
, u32 mtu
);
91 #ifdef CONFIG_IPV6_ROUTE_INFO
92 static struct rt6_info
*rt6_add_route_info(struct net
*net
,
93 const struct in6_addr
*prefix
, int prefixlen
,
94 const struct in6_addr
*gwaddr
, int ifindex
,
96 static struct rt6_info
*rt6_get_route_info(struct net
*net
,
97 const struct in6_addr
*prefix
, int prefixlen
,
98 const struct in6_addr
*gwaddr
, int ifindex
);
101 static u32
*ipv6_cow_metrics(struct dst_entry
*dst
, unsigned long old
)
103 struct rt6_info
*rt
= (struct rt6_info
*) dst
;
104 struct inet_peer
*peer
;
108 rt6_bind_peer(rt
, 1);
110 peer
= rt
->rt6i_peer
;
112 u32
*old_p
= __DST_METRICS_PTR(old
);
113 unsigned long prev
, new;
116 if (inet_metrics_new(peer
))
117 memcpy(p
, old_p
, sizeof(u32
) * RTAX_MAX
);
119 new = (unsigned long) p
;
120 prev
= cmpxchg(&dst
->_metrics
, old
, new);
123 p
= __DST_METRICS_PTR(prev
);
124 if (prev
& DST_METRICS_READ_ONLY
)
131 static struct neighbour
*ip6_neigh_lookup(const struct dst_entry
*dst
, const void *daddr
)
133 return __neigh_lookup_errno(&nd_tbl
, daddr
, dst
->dev
);
136 static struct dst_ops ip6_dst_ops_template
= {
138 .protocol
= cpu_to_be16(ETH_P_IPV6
),
141 .check
= ip6_dst_check
,
142 .default_advmss
= ip6_default_advmss
,
143 .default_mtu
= ip6_default_mtu
,
144 .cow_metrics
= ipv6_cow_metrics
,
145 .destroy
= ip6_dst_destroy
,
146 .ifdown
= ip6_dst_ifdown
,
147 .negative_advice
= ip6_negative_advice
,
148 .link_failure
= ip6_link_failure
,
149 .update_pmtu
= ip6_rt_update_pmtu
,
150 .local_out
= __ip6_local_out
,
151 .neigh_lookup
= ip6_neigh_lookup
,
154 static unsigned int ip6_blackhole_default_mtu(const struct dst_entry
*dst
)
159 static void ip6_rt_blackhole_update_pmtu(struct dst_entry
*dst
, u32 mtu
)
163 static u32
*ip6_rt_blackhole_cow_metrics(struct dst_entry
*dst
,
169 static struct dst_ops ip6_dst_blackhole_ops
= {
171 .protocol
= cpu_to_be16(ETH_P_IPV6
),
172 .destroy
= ip6_dst_destroy
,
173 .check
= ip6_dst_check
,
174 .default_mtu
= ip6_blackhole_default_mtu
,
175 .default_advmss
= ip6_default_advmss
,
176 .update_pmtu
= ip6_rt_blackhole_update_pmtu
,
177 .cow_metrics
= ip6_rt_blackhole_cow_metrics
,
178 .neigh_lookup
= ip6_neigh_lookup
,
181 static const u32 ip6_template_metrics
[RTAX_MAX
] = {
182 [RTAX_HOPLIMIT
- 1] = 255,
185 static struct rt6_info ip6_null_entry_template
= {
187 .__refcnt
= ATOMIC_INIT(1),
190 .error
= -ENETUNREACH
,
191 .input
= ip6_pkt_discard
,
192 .output
= ip6_pkt_discard_out
,
194 .rt6i_flags
= (RTF_REJECT
| RTF_NONEXTHOP
),
195 .rt6i_protocol
= RTPROT_KERNEL
,
196 .rt6i_metric
= ~(u32
) 0,
197 .rt6i_ref
= ATOMIC_INIT(1),
200 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
202 static int ip6_pkt_prohibit(struct sk_buff
*skb
);
203 static int ip6_pkt_prohibit_out(struct sk_buff
*skb
);
205 static struct rt6_info ip6_prohibit_entry_template
= {
207 .__refcnt
= ATOMIC_INIT(1),
211 .input
= ip6_pkt_prohibit
,
212 .output
= ip6_pkt_prohibit_out
,
214 .rt6i_flags
= (RTF_REJECT
| RTF_NONEXTHOP
),
215 .rt6i_protocol
= RTPROT_KERNEL
,
216 .rt6i_metric
= ~(u32
) 0,
217 .rt6i_ref
= ATOMIC_INIT(1),
220 static struct rt6_info ip6_blk_hole_entry_template
= {
222 .__refcnt
= ATOMIC_INIT(1),
226 .input
= dst_discard
,
227 .output
= dst_discard
,
229 .rt6i_flags
= (RTF_REJECT
| RTF_NONEXTHOP
),
230 .rt6i_protocol
= RTPROT_KERNEL
,
231 .rt6i_metric
= ~(u32
) 0,
232 .rt6i_ref
= ATOMIC_INIT(1),
237 /* allocate dst with ip6_dst_ops */
238 static inline struct rt6_info
*ip6_dst_alloc(struct dst_ops
*ops
,
239 struct net_device
*dev
,
242 struct rt6_info
*rt
= dst_alloc(ops
, dev
, 0, 0, flags
);
244 memset(&rt
->rt6i_table
, 0, sizeof(*rt
) - sizeof(struct dst_entry
));
249 static void ip6_dst_destroy(struct dst_entry
*dst
)
251 struct rt6_info
*rt
= (struct rt6_info
*)dst
;
252 struct inet6_dev
*idev
= rt
->rt6i_idev
;
253 struct inet_peer
*peer
= rt
->rt6i_peer
;
256 rt
->rt6i_idev
= NULL
;
260 rt
->rt6i_peer
= NULL
;
265 static atomic_t __rt6_peer_genid
= ATOMIC_INIT(0);
267 static u32
rt6_peer_genid(void)
269 return atomic_read(&__rt6_peer_genid
);
272 void rt6_bind_peer(struct rt6_info
*rt
, int create
)
274 struct inet_peer
*peer
;
276 peer
= inet_getpeer_v6(&rt
->rt6i_dst
.addr
, create
);
277 if (peer
&& cmpxchg(&rt
->rt6i_peer
, NULL
, peer
) != NULL
)
280 rt
->rt6i_peer_genid
= rt6_peer_genid();
283 static void ip6_dst_ifdown(struct dst_entry
*dst
, struct net_device
*dev
,
286 struct rt6_info
*rt
= (struct rt6_info
*)dst
;
287 struct inet6_dev
*idev
= rt
->rt6i_idev
;
288 struct net_device
*loopback_dev
=
289 dev_net(dev
)->loopback_dev
;
291 if (dev
!= loopback_dev
&& idev
!= NULL
&& idev
->dev
== dev
) {
292 struct inet6_dev
*loopback_idev
=
293 in6_dev_get(loopback_dev
);
294 if (loopback_idev
!= NULL
) {
295 rt
->rt6i_idev
= loopback_idev
;
301 static __inline__
int rt6_check_expired(const struct rt6_info
*rt
)
303 return (rt
->rt6i_flags
& RTF_EXPIRES
) &&
304 time_after(jiffies
, rt
->rt6i_expires
);
307 static inline int rt6_need_strict(const struct in6_addr
*daddr
)
309 return ipv6_addr_type(daddr
) &
310 (IPV6_ADDR_MULTICAST
| IPV6_ADDR_LINKLOCAL
| IPV6_ADDR_LOOPBACK
);
314 * Route lookup. Any table->tb6_lock is implied.
317 static inline struct rt6_info
*rt6_device_match(struct net
*net
,
319 const struct in6_addr
*saddr
,
323 struct rt6_info
*local
= NULL
;
324 struct rt6_info
*sprt
;
326 if (!oif
&& ipv6_addr_any(saddr
))
329 for (sprt
= rt
; sprt
; sprt
= sprt
->dst
.rt6_next
) {
330 struct net_device
*dev
= sprt
->rt6i_dev
;
333 if (dev
->ifindex
== oif
)
335 if (dev
->flags
& IFF_LOOPBACK
) {
336 if (sprt
->rt6i_idev
== NULL
||
337 sprt
->rt6i_idev
->dev
->ifindex
!= oif
) {
338 if (flags
& RT6_LOOKUP_F_IFACE
&& oif
)
340 if (local
&& (!oif
||
341 local
->rt6i_idev
->dev
->ifindex
== oif
))
347 if (ipv6_chk_addr(net
, saddr
, dev
,
348 flags
& RT6_LOOKUP_F_IFACE
))
357 if (flags
& RT6_LOOKUP_F_IFACE
)
358 return net
->ipv6
.ip6_null_entry
;
364 #ifdef CONFIG_IPV6_ROUTER_PREF
365 static void rt6_probe(struct rt6_info
*rt
)
367 struct neighbour
*neigh
= rt
? dst_get_neighbour(&rt
->dst
) : NULL
;
369 * Okay, this does not seem to be appropriate
370 * for now, however, we need to check if it
371 * is really so; aka Router Reachability Probing.
373 * Router Reachability Probe MUST be rate-limited
374 * to no more than one per minute.
376 if (!neigh
|| (neigh
->nud_state
& NUD_VALID
))
378 read_lock_bh(&neigh
->lock
);
379 if (!(neigh
->nud_state
& NUD_VALID
) &&
380 time_after(jiffies
, neigh
->updated
+ rt
->rt6i_idev
->cnf
.rtr_probe_interval
)) {
381 struct in6_addr mcaddr
;
382 struct in6_addr
*target
;
384 neigh
->updated
= jiffies
;
385 read_unlock_bh(&neigh
->lock
);
387 target
= (struct in6_addr
*)&neigh
->primary_key
;
388 addrconf_addr_solict_mult(target
, &mcaddr
);
389 ndisc_send_ns(rt
->rt6i_dev
, NULL
, target
, &mcaddr
, NULL
);
391 read_unlock_bh(&neigh
->lock
);
394 static inline void rt6_probe(struct rt6_info
*rt
)
400 * Default Router Selection (RFC 2461 6.3.6)
402 static inline int rt6_check_dev(struct rt6_info
*rt
, int oif
)
404 struct net_device
*dev
= rt
->rt6i_dev
;
405 if (!oif
|| dev
->ifindex
== oif
)
407 if ((dev
->flags
& IFF_LOOPBACK
) &&
408 rt
->rt6i_idev
&& rt
->rt6i_idev
->dev
->ifindex
== oif
)
413 static inline int rt6_check_neigh(struct rt6_info
*rt
)
415 struct neighbour
*neigh
= dst_get_neighbour(&rt
->dst
);
417 if (rt
->rt6i_flags
& RTF_NONEXTHOP
||
418 !(rt
->rt6i_flags
& RTF_GATEWAY
))
421 read_lock_bh(&neigh
->lock
);
422 if (neigh
->nud_state
& NUD_VALID
)
424 #ifdef CONFIG_IPV6_ROUTER_PREF
425 else if (neigh
->nud_state
& NUD_FAILED
)
430 read_unlock_bh(&neigh
->lock
);
436 static int rt6_score_route(struct rt6_info
*rt
, int oif
,
441 m
= rt6_check_dev(rt
, oif
);
442 if (!m
&& (strict
& RT6_LOOKUP_F_IFACE
))
444 #ifdef CONFIG_IPV6_ROUTER_PREF
445 m
|= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(rt
->rt6i_flags
)) << 2;
447 n
= rt6_check_neigh(rt
);
448 if (!n
&& (strict
& RT6_LOOKUP_F_REACHABLE
))
453 static struct rt6_info
*find_match(struct rt6_info
*rt
, int oif
, int strict
,
454 int *mpri
, struct rt6_info
*match
)
458 if (rt6_check_expired(rt
))
461 m
= rt6_score_route(rt
, oif
, strict
);
466 if (strict
& RT6_LOOKUP_F_REACHABLE
)
470 } else if (strict
& RT6_LOOKUP_F_REACHABLE
) {
478 static struct rt6_info
*find_rr_leaf(struct fib6_node
*fn
,
479 struct rt6_info
*rr_head
,
480 u32 metric
, int oif
, int strict
)
482 struct rt6_info
*rt
, *match
;
486 for (rt
= rr_head
; rt
&& rt
->rt6i_metric
== metric
;
487 rt
= rt
->dst
.rt6_next
)
488 match
= find_match(rt
, oif
, strict
, &mpri
, match
);
489 for (rt
= fn
->leaf
; rt
&& rt
!= rr_head
&& rt
->rt6i_metric
== metric
;
490 rt
= rt
->dst
.rt6_next
)
491 match
= find_match(rt
, oif
, strict
, &mpri
, match
);
496 static struct rt6_info
*rt6_select(struct fib6_node
*fn
, int oif
, int strict
)
498 struct rt6_info
*match
, *rt0
;
501 RT6_TRACE("%s(fn->leaf=%p, oif=%d)\n",
502 __func__
, fn
->leaf
, oif
);
506 fn
->rr_ptr
= rt0
= fn
->leaf
;
508 match
= find_rr_leaf(fn
, rt0
, rt0
->rt6i_metric
, oif
, strict
);
511 (strict
& RT6_LOOKUP_F_REACHABLE
)) {
512 struct rt6_info
*next
= rt0
->dst
.rt6_next
;
514 /* no entries matched; do round-robin */
515 if (!next
|| next
->rt6i_metric
!= rt0
->rt6i_metric
)
522 RT6_TRACE("%s() => %p\n",
525 net
= dev_net(rt0
->rt6i_dev
);
526 return match
? match
: net
->ipv6
.ip6_null_entry
;
529 #ifdef CONFIG_IPV6_ROUTE_INFO
530 int rt6_route_rcv(struct net_device
*dev
, u8
*opt
, int len
,
531 const struct in6_addr
*gwaddr
)
533 struct net
*net
= dev_net(dev
);
534 struct route_info
*rinfo
= (struct route_info
*) opt
;
535 struct in6_addr prefix_buf
, *prefix
;
537 unsigned long lifetime
;
540 if (len
< sizeof(struct route_info
)) {
544 /* Sanity check for prefix_len and length */
545 if (rinfo
->length
> 3) {
547 } else if (rinfo
->prefix_len
> 128) {
549 } else if (rinfo
->prefix_len
> 64) {
550 if (rinfo
->length
< 2) {
553 } else if (rinfo
->prefix_len
> 0) {
554 if (rinfo
->length
< 1) {
559 pref
= rinfo
->route_pref
;
560 if (pref
== ICMPV6_ROUTER_PREF_INVALID
)
563 lifetime
= addrconf_timeout_fixup(ntohl(rinfo
->lifetime
), HZ
);
565 if (rinfo
->length
== 3)
566 prefix
= (struct in6_addr
*)rinfo
->prefix
;
568 /* this function is safe */
569 ipv6_addr_prefix(&prefix_buf
,
570 (struct in6_addr
*)rinfo
->prefix
,
572 prefix
= &prefix_buf
;
575 rt
= rt6_get_route_info(net
, prefix
, rinfo
->prefix_len
, gwaddr
,
578 if (rt
&& !lifetime
) {
584 rt
= rt6_add_route_info(net
, prefix
, rinfo
->prefix_len
, gwaddr
, dev
->ifindex
,
587 rt
->rt6i_flags
= RTF_ROUTEINFO
|
588 (rt
->rt6i_flags
& ~RTF_PREF_MASK
) | RTF_PREF(pref
);
591 if (!addrconf_finite_timeout(lifetime
)) {
592 rt
->rt6i_flags
&= ~RTF_EXPIRES
;
594 rt
->rt6i_expires
= jiffies
+ HZ
* lifetime
;
595 rt
->rt6i_flags
|= RTF_EXPIRES
;
597 dst_release(&rt
->dst
);
603 #define BACKTRACK(__net, saddr) \
605 if (rt == __net->ipv6.ip6_null_entry) { \
606 struct fib6_node *pn; \
608 if (fn->fn_flags & RTN_TL_ROOT) \
611 if (FIB6_SUBTREE(pn) && FIB6_SUBTREE(pn) != fn) \
612 fn = fib6_lookup(FIB6_SUBTREE(pn), NULL, saddr); \
615 if (fn->fn_flags & RTN_RTINFO) \
621 static struct rt6_info
*ip6_pol_route_lookup(struct net
*net
,
622 struct fib6_table
*table
,
623 struct flowi6
*fl6
, int flags
)
625 struct fib6_node
*fn
;
628 read_lock_bh(&table
->tb6_lock
);
629 fn
= fib6_lookup(&table
->tb6_root
, &fl6
->daddr
, &fl6
->saddr
);
632 rt
= rt6_device_match(net
, rt
, &fl6
->saddr
, fl6
->flowi6_oif
, flags
);
633 BACKTRACK(net
, &fl6
->saddr
);
635 dst_use(&rt
->dst
, jiffies
);
636 read_unlock_bh(&table
->tb6_lock
);
641 struct rt6_info
*rt6_lookup(struct net
*net
, const struct in6_addr
*daddr
,
642 const struct in6_addr
*saddr
, int oif
, int strict
)
644 struct flowi6 fl6
= {
648 struct dst_entry
*dst
;
649 int flags
= strict
? RT6_LOOKUP_F_IFACE
: 0;
652 memcpy(&fl6
.saddr
, saddr
, sizeof(*saddr
));
653 flags
|= RT6_LOOKUP_F_HAS_SADDR
;
656 dst
= fib6_rule_lookup(net
, &fl6
, flags
, ip6_pol_route_lookup
);
658 return (struct rt6_info
*) dst
;
665 EXPORT_SYMBOL(rt6_lookup
);
667 /* ip6_ins_rt is called with FREE table->tb6_lock.
668 It takes new route entry, the addition fails by any reason the
669 route is freed. In any case, if caller does not hold it, it may
673 static int __ip6_ins_rt(struct rt6_info
*rt
, struct nl_info
*info
)
676 struct fib6_table
*table
;
678 table
= rt
->rt6i_table
;
679 write_lock_bh(&table
->tb6_lock
);
680 err
= fib6_add(&table
->tb6_root
, rt
, info
);
681 write_unlock_bh(&table
->tb6_lock
);
686 int ip6_ins_rt(struct rt6_info
*rt
)
688 struct nl_info info
= {
689 .nl_net
= dev_net(rt
->rt6i_dev
),
691 return __ip6_ins_rt(rt
, &info
);
694 static struct rt6_info
*rt6_alloc_cow(const struct rt6_info
*ort
,
695 const struct in6_addr
*daddr
,
696 const struct in6_addr
*saddr
)
704 rt
= ip6_rt_copy(ort
, daddr
);
707 struct neighbour
*neigh
;
708 int attempts
= !in_softirq();
710 if (!(rt
->rt6i_flags
&RTF_GATEWAY
)) {
711 if (rt
->rt6i_dst
.plen
!= 128 &&
712 ipv6_addr_equal(&ort
->rt6i_dst
.addr
, daddr
))
713 rt
->rt6i_flags
|= RTF_ANYCAST
;
714 ipv6_addr_copy(&rt
->rt6i_gateway
, daddr
);
717 rt
->rt6i_dst
.plen
= 128;
718 rt
->rt6i_flags
|= RTF_CACHE
;
719 rt
->dst
.flags
|= DST_HOST
;
721 #ifdef CONFIG_IPV6_SUBTREES
722 if (rt
->rt6i_src
.plen
&& saddr
) {
723 ipv6_addr_copy(&rt
->rt6i_src
.addr
, saddr
);
724 rt
->rt6i_src
.plen
= 128;
729 neigh
= ndisc_get_neigh(rt
->rt6i_dev
, &rt
->rt6i_gateway
);
731 struct net
*net
= dev_net(rt
->rt6i_dev
);
732 int saved_rt_min_interval
=
733 net
->ipv6
.sysctl
.ip6_rt_gc_min_interval
;
734 int saved_rt_elasticity
=
735 net
->ipv6
.sysctl
.ip6_rt_gc_elasticity
;
737 if (attempts
-- > 0) {
738 net
->ipv6
.sysctl
.ip6_rt_gc_elasticity
= 1;
739 net
->ipv6
.sysctl
.ip6_rt_gc_min_interval
= 0;
741 ip6_dst_gc(&net
->ipv6
.ip6_dst_ops
);
743 net
->ipv6
.sysctl
.ip6_rt_gc_elasticity
=
745 net
->ipv6
.sysctl
.ip6_rt_gc_min_interval
=
746 saved_rt_min_interval
;
752 "ipv6: Neighbour table overflow.\n");
756 dst_set_neighbour(&rt
->dst
, neigh
);
763 static struct rt6_info
*rt6_alloc_clone(struct rt6_info
*ort
,
764 const struct in6_addr
*daddr
)
766 struct rt6_info
*rt
= ip6_rt_copy(ort
, daddr
);
769 rt
->rt6i_dst
.plen
= 128;
770 rt
->rt6i_flags
|= RTF_CACHE
;
771 rt
->dst
.flags
|= DST_HOST
;
772 dst_set_neighbour(&rt
->dst
, neigh_clone(dst_get_neighbour(&ort
->dst
)));
777 static struct rt6_info
*ip6_pol_route(struct net
*net
, struct fib6_table
*table
, int oif
,
778 struct flowi6
*fl6
, int flags
)
780 struct fib6_node
*fn
;
781 struct rt6_info
*rt
, *nrt
;
785 int reachable
= net
->ipv6
.devconf_all
->forwarding
? 0 : RT6_LOOKUP_F_REACHABLE
;
787 strict
|= flags
& RT6_LOOKUP_F_IFACE
;
790 read_lock_bh(&table
->tb6_lock
);
793 fn
= fib6_lookup(&table
->tb6_root
, &fl6
->daddr
, &fl6
->saddr
);
796 rt
= rt6_select(fn
, oif
, strict
| reachable
);
798 BACKTRACK(net
, &fl6
->saddr
);
799 if (rt
== net
->ipv6
.ip6_null_entry
||
800 rt
->rt6i_flags
& RTF_CACHE
)
804 read_unlock_bh(&table
->tb6_lock
);
806 if (!dst_get_neighbour(&rt
->dst
) && !(rt
->rt6i_flags
& RTF_NONEXTHOP
))
807 nrt
= rt6_alloc_cow(rt
, &fl6
->daddr
, &fl6
->saddr
);
808 else if (!(rt
->dst
.flags
& DST_HOST
))
809 nrt
= rt6_alloc_clone(rt
, &fl6
->daddr
);
813 dst_release(&rt
->dst
);
814 rt
= nrt
? : net
->ipv6
.ip6_null_entry
;
818 err
= ip6_ins_rt(nrt
);
827 * Race condition! In the gap, when table->tb6_lock was
828 * released someone could insert this route. Relookup.
830 dst_release(&rt
->dst
);
839 read_unlock_bh(&table
->tb6_lock
);
841 rt
->dst
.lastuse
= jiffies
;
847 static struct rt6_info
*ip6_pol_route_input(struct net
*net
, struct fib6_table
*table
,
848 struct flowi6
*fl6
, int flags
)
850 return ip6_pol_route(net
, table
, fl6
->flowi6_iif
, fl6
, flags
);
853 void ip6_route_input(struct sk_buff
*skb
)
855 const struct ipv6hdr
*iph
= ipv6_hdr(skb
);
856 struct net
*net
= dev_net(skb
->dev
);
857 int flags
= RT6_LOOKUP_F_HAS_SADDR
;
858 struct flowi6 fl6
= {
859 .flowi6_iif
= skb
->dev
->ifindex
,
862 .flowlabel
= (* (__be32
*) iph
)&IPV6_FLOWINFO_MASK
,
863 .flowi6_mark
= skb
->mark
,
864 .flowi6_proto
= iph
->nexthdr
,
867 if (rt6_need_strict(&iph
->daddr
) && skb
->dev
->type
!= ARPHRD_PIMREG
)
868 flags
|= RT6_LOOKUP_F_IFACE
;
870 skb_dst_set(skb
, fib6_rule_lookup(net
, &fl6
, flags
, ip6_pol_route_input
));
873 static struct rt6_info
*ip6_pol_route_output(struct net
*net
, struct fib6_table
*table
,
874 struct flowi6
*fl6
, int flags
)
876 return ip6_pol_route(net
, table
, fl6
->flowi6_oif
, fl6
, flags
);
879 struct dst_entry
* ip6_route_output(struct net
*net
, const struct sock
*sk
,
884 if ((sk
&& sk
->sk_bound_dev_if
) || rt6_need_strict(&fl6
->daddr
))
885 flags
|= RT6_LOOKUP_F_IFACE
;
887 if (!ipv6_addr_any(&fl6
->saddr
))
888 flags
|= RT6_LOOKUP_F_HAS_SADDR
;
890 flags
|= rt6_srcprefs2flags(inet6_sk(sk
)->srcprefs
);
892 return fib6_rule_lookup(net
, fl6
, flags
, ip6_pol_route_output
);
895 EXPORT_SYMBOL(ip6_route_output
);
897 struct dst_entry
*ip6_blackhole_route(struct net
*net
, struct dst_entry
*dst_orig
)
899 struct rt6_info
*rt
, *ort
= (struct rt6_info
*) dst_orig
;
900 struct dst_entry
*new = NULL
;
902 rt
= dst_alloc(&ip6_dst_blackhole_ops
, ort
->dst
.dev
, 1, 0, 0);
904 memset(&rt
->rt6i_table
, 0, sizeof(*rt
) - sizeof(struct dst_entry
));
909 new->input
= dst_discard
;
910 new->output
= dst_discard
;
912 if (dst_metrics_read_only(&ort
->dst
))
913 new->_metrics
= ort
->dst
._metrics
;
915 dst_copy_metrics(new, &ort
->dst
);
916 rt
->rt6i_idev
= ort
->rt6i_idev
;
918 in6_dev_hold(rt
->rt6i_idev
);
919 rt
->rt6i_expires
= 0;
921 ipv6_addr_copy(&rt
->rt6i_gateway
, &ort
->rt6i_gateway
);
922 rt
->rt6i_flags
= ort
->rt6i_flags
& ~RTF_EXPIRES
;
925 memcpy(&rt
->rt6i_dst
, &ort
->rt6i_dst
, sizeof(struct rt6key
));
926 #ifdef CONFIG_IPV6_SUBTREES
927 memcpy(&rt
->rt6i_src
, &ort
->rt6i_src
, sizeof(struct rt6key
));
933 dst_release(dst_orig
);
934 return new ? new : ERR_PTR(-ENOMEM
);
938 * Destination cache support functions
941 static struct dst_entry
*ip6_dst_check(struct dst_entry
*dst
, u32 cookie
)
945 rt
= (struct rt6_info
*) dst
;
947 if (rt
->rt6i_node
&& (rt
->rt6i_node
->fn_sernum
== cookie
)) {
948 if (rt
->rt6i_peer_genid
!= rt6_peer_genid()) {
950 rt6_bind_peer(rt
, 0);
951 rt
->rt6i_peer_genid
= rt6_peer_genid();
958 static struct dst_entry
*ip6_negative_advice(struct dst_entry
*dst
)
960 struct rt6_info
*rt
= (struct rt6_info
*) dst
;
963 if (rt
->rt6i_flags
& RTF_CACHE
) {
964 if (rt6_check_expired(rt
)) {
976 static void ip6_link_failure(struct sk_buff
*skb
)
980 icmpv6_send(skb
, ICMPV6_DEST_UNREACH
, ICMPV6_ADDR_UNREACH
, 0);
982 rt
= (struct rt6_info
*) skb_dst(skb
);
984 if (rt
->rt6i_flags
&RTF_CACHE
) {
985 dst_set_expires(&rt
->dst
, 0);
986 rt
->rt6i_flags
|= RTF_EXPIRES
;
987 } else if (rt
->rt6i_node
&& (rt
->rt6i_flags
& RTF_DEFAULT
))
988 rt
->rt6i_node
->fn_sernum
= -1;
992 static void ip6_rt_update_pmtu(struct dst_entry
*dst
, u32 mtu
)
994 struct rt6_info
*rt6
= (struct rt6_info
*)dst
;
996 if (mtu
< dst_mtu(dst
) && rt6
->rt6i_dst
.plen
== 128) {
997 rt6
->rt6i_flags
|= RTF_MODIFIED
;
998 if (mtu
< IPV6_MIN_MTU
) {
999 u32 features
= dst_metric(dst
, RTAX_FEATURES
);
1001 features
|= RTAX_FEATURE_ALLFRAG
;
1002 dst_metric_set(dst
, RTAX_FEATURES
, features
);
1004 dst_metric_set(dst
, RTAX_MTU
, mtu
);
1008 static unsigned int ip6_default_advmss(const struct dst_entry
*dst
)
1010 struct net_device
*dev
= dst
->dev
;
1011 unsigned int mtu
= dst_mtu(dst
);
1012 struct net
*net
= dev_net(dev
);
1014 mtu
-= sizeof(struct ipv6hdr
) + sizeof(struct tcphdr
);
1016 if (mtu
< net
->ipv6
.sysctl
.ip6_rt_min_advmss
)
1017 mtu
= net
->ipv6
.sysctl
.ip6_rt_min_advmss
;
1020 * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and
1021 * corresponding MSS is IPV6_MAXPLEN - tcp_header_size.
1022 * IPV6_MAXPLEN is also valid and means: "any MSS,
1023 * rely only on pmtu discovery"
1025 if (mtu
> IPV6_MAXPLEN
- sizeof(struct tcphdr
))
1030 static unsigned int ip6_default_mtu(const struct dst_entry
*dst
)
1032 unsigned int mtu
= IPV6_MIN_MTU
;
1033 struct inet6_dev
*idev
;
1036 idev
= __in6_dev_get(dst
->dev
);
1038 mtu
= idev
->cnf
.mtu6
;
1044 static struct dst_entry
*icmp6_dst_gc_list
;
1045 static DEFINE_SPINLOCK(icmp6_dst_lock
);
1047 struct dst_entry
*icmp6_dst_alloc(struct net_device
*dev
,
1048 struct neighbour
*neigh
,
1049 const struct in6_addr
*addr
)
1051 struct rt6_info
*rt
;
1052 struct inet6_dev
*idev
= in6_dev_get(dev
);
1053 struct net
*net
= dev_net(dev
);
1055 if (unlikely(idev
== NULL
))
1058 rt
= ip6_dst_alloc(&net
->ipv6
.ip6_dst_ops
, dev
, 0);
1059 if (unlikely(rt
== NULL
)) {
1067 neigh
= ndisc_get_neigh(dev
, addr
);
1072 rt
->rt6i_idev
= idev
;
1073 dst_set_neighbour(&rt
->dst
, neigh
);
1074 atomic_set(&rt
->dst
.__refcnt
, 1);
1075 ipv6_addr_copy(&rt
->rt6i_dst
.addr
, addr
);
1076 dst_metric_set(&rt
->dst
, RTAX_HOPLIMIT
, 255);
1077 rt
->dst
.output
= ip6_output
;
1079 spin_lock_bh(&icmp6_dst_lock
);
1080 rt
->dst
.next
= icmp6_dst_gc_list
;
1081 icmp6_dst_gc_list
= &rt
->dst
;
1082 spin_unlock_bh(&icmp6_dst_lock
);
1084 fib6_force_start_gc(net
);
1090 int icmp6_dst_gc(void)
1092 struct dst_entry
*dst
, **pprev
;
1095 spin_lock_bh(&icmp6_dst_lock
);
1096 pprev
= &icmp6_dst_gc_list
;
1098 while ((dst
= *pprev
) != NULL
) {
1099 if (!atomic_read(&dst
->__refcnt
)) {
1108 spin_unlock_bh(&icmp6_dst_lock
);
1113 static void icmp6_clean_all(int (*func
)(struct rt6_info
*rt
, void *arg
),
1116 struct dst_entry
*dst
, **pprev
;
1118 spin_lock_bh(&icmp6_dst_lock
);
1119 pprev
= &icmp6_dst_gc_list
;
1120 while ((dst
= *pprev
) != NULL
) {
1121 struct rt6_info
*rt
= (struct rt6_info
*) dst
;
1122 if (func(rt
, arg
)) {
1129 spin_unlock_bh(&icmp6_dst_lock
);
1132 static int ip6_dst_gc(struct dst_ops
*ops
)
1134 unsigned long now
= jiffies
;
1135 struct net
*net
= container_of(ops
, struct net
, ipv6
.ip6_dst_ops
);
1136 int rt_min_interval
= net
->ipv6
.sysctl
.ip6_rt_gc_min_interval
;
1137 int rt_max_size
= net
->ipv6
.sysctl
.ip6_rt_max_size
;
1138 int rt_elasticity
= net
->ipv6
.sysctl
.ip6_rt_gc_elasticity
;
1139 int rt_gc_timeout
= net
->ipv6
.sysctl
.ip6_rt_gc_timeout
;
1140 unsigned long rt_last_gc
= net
->ipv6
.ip6_rt_last_gc
;
1143 entries
= dst_entries_get_fast(ops
);
1144 if (time_after(rt_last_gc
+ rt_min_interval
, now
) &&
1145 entries
<= rt_max_size
)
1148 net
->ipv6
.ip6_rt_gc_expire
++;
1149 fib6_run_gc(net
->ipv6
.ip6_rt_gc_expire
, net
);
1150 net
->ipv6
.ip6_rt_last_gc
= now
;
1151 entries
= dst_entries_get_slow(ops
);
1152 if (entries
< ops
->gc_thresh
)
1153 net
->ipv6
.ip6_rt_gc_expire
= rt_gc_timeout
>>1;
1155 net
->ipv6
.ip6_rt_gc_expire
-= net
->ipv6
.ip6_rt_gc_expire
>>rt_elasticity
;
1156 return entries
> rt_max_size
;
1159 /* Clean host part of a prefix. Not necessary in radix tree,
1160 but results in cleaner routing tables.
1162 Remove it only when all the things will work!
1165 int ip6_dst_hoplimit(struct dst_entry
*dst
)
1167 int hoplimit
= dst_metric_raw(dst
, RTAX_HOPLIMIT
);
1168 if (hoplimit
== 0) {
1169 struct net_device
*dev
= dst
->dev
;
1170 struct inet6_dev
*idev
;
1173 idev
= __in6_dev_get(dev
);
1175 hoplimit
= idev
->cnf
.hop_limit
;
1177 hoplimit
= dev_net(dev
)->ipv6
.devconf_all
->hop_limit
;
1182 EXPORT_SYMBOL(ip6_dst_hoplimit
);
1188 int ip6_route_add(struct fib6_config
*cfg
)
1191 struct net
*net
= cfg
->fc_nlinfo
.nl_net
;
1192 struct rt6_info
*rt
= NULL
;
1193 struct net_device
*dev
= NULL
;
1194 struct inet6_dev
*idev
= NULL
;
1195 struct fib6_table
*table
;
1198 if (cfg
->fc_dst_len
> 128 || cfg
->fc_src_len
> 128)
1200 #ifndef CONFIG_IPV6_SUBTREES
1201 if (cfg
->fc_src_len
)
1204 if (cfg
->fc_ifindex
) {
1206 dev
= dev_get_by_index(net
, cfg
->fc_ifindex
);
1209 idev
= in6_dev_get(dev
);
1214 if (cfg
->fc_metric
== 0)
1215 cfg
->fc_metric
= IP6_RT_PRIO_USER
;
1217 table
= fib6_new_table(net
, cfg
->fc_table
);
1218 if (table
== NULL
) {
1223 rt
= ip6_dst_alloc(&net
->ipv6
.ip6_dst_ops
, NULL
, DST_NOCOUNT
);
1230 rt
->dst
.obsolete
= -1;
1231 rt
->rt6i_expires
= (cfg
->fc_flags
& RTF_EXPIRES
) ?
1232 jiffies
+ clock_t_to_jiffies(cfg
->fc_expires
) :
1235 if (cfg
->fc_protocol
== RTPROT_UNSPEC
)
1236 cfg
->fc_protocol
= RTPROT_BOOT
;
1237 rt
->rt6i_protocol
= cfg
->fc_protocol
;
1239 addr_type
= ipv6_addr_type(&cfg
->fc_dst
);
1241 if (addr_type
& IPV6_ADDR_MULTICAST
)
1242 rt
->dst
.input
= ip6_mc_input
;
1243 else if (cfg
->fc_flags
& RTF_LOCAL
)
1244 rt
->dst
.input
= ip6_input
;
1246 rt
->dst
.input
= ip6_forward
;
1248 rt
->dst
.output
= ip6_output
;
1250 ipv6_addr_prefix(&rt
->rt6i_dst
.addr
, &cfg
->fc_dst
, cfg
->fc_dst_len
);
1251 rt
->rt6i_dst
.plen
= cfg
->fc_dst_len
;
1252 if (rt
->rt6i_dst
.plen
== 128)
1253 rt
->dst
.flags
|= DST_HOST
;
1255 #ifdef CONFIG_IPV6_SUBTREES
1256 ipv6_addr_prefix(&rt
->rt6i_src
.addr
, &cfg
->fc_src
, cfg
->fc_src_len
);
1257 rt
->rt6i_src
.plen
= cfg
->fc_src_len
;
1260 rt
->rt6i_metric
= cfg
->fc_metric
;
1262 /* We cannot add true routes via loopback here,
1263 they would result in kernel looping; promote them to reject routes
1265 if ((cfg
->fc_flags
& RTF_REJECT
) ||
1266 (dev
&& (dev
->flags
&IFF_LOOPBACK
) && !(addr_type
&IPV6_ADDR_LOOPBACK
)
1267 && !(cfg
->fc_flags
&RTF_LOCAL
))) {
1268 /* hold loopback dev/idev if we haven't done so. */
1269 if (dev
!= net
->loopback_dev
) {
1274 dev
= net
->loopback_dev
;
1276 idev
= in6_dev_get(dev
);
1282 rt
->dst
.output
= ip6_pkt_discard_out
;
1283 rt
->dst
.input
= ip6_pkt_discard
;
1284 rt
->dst
.error
= -ENETUNREACH
;
1285 rt
->rt6i_flags
= RTF_REJECT
|RTF_NONEXTHOP
;
1289 if (cfg
->fc_flags
& RTF_GATEWAY
) {
1290 const struct in6_addr
*gw_addr
;
1293 gw_addr
= &cfg
->fc_gateway
;
1294 ipv6_addr_copy(&rt
->rt6i_gateway
, gw_addr
);
1295 gwa_type
= ipv6_addr_type(gw_addr
);
1297 if (gwa_type
!= (IPV6_ADDR_LINKLOCAL
|IPV6_ADDR_UNICAST
)) {
1298 struct rt6_info
*grt
;
1300 /* IPv6 strictly inhibits using not link-local
1301 addresses as nexthop address.
1302 Otherwise, router will not able to send redirects.
1303 It is very good, but in some (rare!) circumstances
1304 (SIT, PtP, NBMA NOARP links) it is handy to allow
1305 some exceptions. --ANK
1308 if (!(gwa_type
&IPV6_ADDR_UNICAST
))
1311 grt
= rt6_lookup(net
, gw_addr
, NULL
, cfg
->fc_ifindex
, 1);
1313 err
= -EHOSTUNREACH
;
1317 if (dev
!= grt
->rt6i_dev
) {
1318 dst_release(&grt
->dst
);
1322 dev
= grt
->rt6i_dev
;
1323 idev
= grt
->rt6i_idev
;
1325 in6_dev_hold(grt
->rt6i_idev
);
1327 if (!(grt
->rt6i_flags
&RTF_GATEWAY
))
1329 dst_release(&grt
->dst
);
1335 if (dev
== NULL
|| (dev
->flags
&IFF_LOOPBACK
))
1343 if (!ipv6_addr_any(&cfg
->fc_prefsrc
)) {
1344 if (!ipv6_chk_addr(net
, &cfg
->fc_prefsrc
, dev
, 0)) {
1348 ipv6_addr_copy(&rt
->rt6i_prefsrc
.addr
, &cfg
->fc_prefsrc
);
1349 rt
->rt6i_prefsrc
.plen
= 128;
1351 rt
->rt6i_prefsrc
.plen
= 0;
1353 if (cfg
->fc_flags
& (RTF_GATEWAY
| RTF_NONEXTHOP
)) {
1354 struct neighbour
*n
= __neigh_lookup_errno(&nd_tbl
, &rt
->rt6i_gateway
, dev
);
1359 dst_set_neighbour(&rt
->dst
, n
);
1362 rt
->rt6i_flags
= cfg
->fc_flags
;
1369 nla_for_each_attr(nla
, cfg
->fc_mx
, cfg
->fc_mx_len
, remaining
) {
1370 int type
= nla_type(nla
);
1373 if (type
> RTAX_MAX
) {
1378 dst_metric_set(&rt
->dst
, type
, nla_get_u32(nla
));
1384 rt
->rt6i_idev
= idev
;
1385 rt
->rt6i_table
= table
;
1387 cfg
->fc_nlinfo
.nl_net
= dev_net(dev
);
1389 return __ip6_ins_rt(rt
, &cfg
->fc_nlinfo
);
1401 static int __ip6_del_rt(struct rt6_info
*rt
, struct nl_info
*info
)
1404 struct fib6_table
*table
;
1405 struct net
*net
= dev_net(rt
->rt6i_dev
);
1407 if (rt
== net
->ipv6
.ip6_null_entry
)
1410 table
= rt
->rt6i_table
;
1411 write_lock_bh(&table
->tb6_lock
);
1413 err
= fib6_del(rt
, info
);
1414 dst_release(&rt
->dst
);
1416 write_unlock_bh(&table
->tb6_lock
);
1421 int ip6_del_rt(struct rt6_info
*rt
)
1423 struct nl_info info
= {
1424 .nl_net
= dev_net(rt
->rt6i_dev
),
1426 return __ip6_del_rt(rt
, &info
);
1429 static int ip6_route_del(struct fib6_config
*cfg
)
1431 struct fib6_table
*table
;
1432 struct fib6_node
*fn
;
1433 struct rt6_info
*rt
;
1436 table
= fib6_get_table(cfg
->fc_nlinfo
.nl_net
, cfg
->fc_table
);
1440 read_lock_bh(&table
->tb6_lock
);
1442 fn
= fib6_locate(&table
->tb6_root
,
1443 &cfg
->fc_dst
, cfg
->fc_dst_len
,
1444 &cfg
->fc_src
, cfg
->fc_src_len
);
1447 for (rt
= fn
->leaf
; rt
; rt
= rt
->dst
.rt6_next
) {
1448 if (cfg
->fc_ifindex
&&
1449 (rt
->rt6i_dev
== NULL
||
1450 rt
->rt6i_dev
->ifindex
!= cfg
->fc_ifindex
))
1452 if (cfg
->fc_flags
& RTF_GATEWAY
&&
1453 !ipv6_addr_equal(&cfg
->fc_gateway
, &rt
->rt6i_gateway
))
1455 if (cfg
->fc_metric
&& cfg
->fc_metric
!= rt
->rt6i_metric
)
1458 read_unlock_bh(&table
->tb6_lock
);
1460 return __ip6_del_rt(rt
, &cfg
->fc_nlinfo
);
1463 read_unlock_bh(&table
->tb6_lock
);
1471 struct ip6rd_flowi
{
1473 struct in6_addr gateway
;
1476 static struct rt6_info
*__ip6_route_redirect(struct net
*net
,
1477 struct fib6_table
*table
,
1481 struct ip6rd_flowi
*rdfl
= (struct ip6rd_flowi
*)fl6
;
1482 struct rt6_info
*rt
;
1483 struct fib6_node
*fn
;
1486 * Get the "current" route for this destination and
1487 * check if the redirect has come from approriate router.
1489 * RFC 2461 specifies that redirects should only be
1490 * accepted if they come from the nexthop to the target.
1491 * Due to the way the routes are chosen, this notion
1492 * is a bit fuzzy and one might need to check all possible
1496 read_lock_bh(&table
->tb6_lock
);
1497 fn
= fib6_lookup(&table
->tb6_root
, &fl6
->daddr
, &fl6
->saddr
);
1499 for (rt
= fn
->leaf
; rt
; rt
= rt
->dst
.rt6_next
) {
1501 * Current route is on-link; redirect is always invalid.
1503 * Seems, previous statement is not true. It could
1504 * be node, which looks for us as on-link (f.e. proxy ndisc)
1505 * But then router serving it might decide, that we should
1506 * know truth 8)8) --ANK (980726).
1508 if (rt6_check_expired(rt
))
1510 if (!(rt
->rt6i_flags
& RTF_GATEWAY
))
1512 if (fl6
->flowi6_oif
!= rt
->rt6i_dev
->ifindex
)
1514 if (!ipv6_addr_equal(&rdfl
->gateway
, &rt
->rt6i_gateway
))
1520 rt
= net
->ipv6
.ip6_null_entry
;
1521 BACKTRACK(net
, &fl6
->saddr
);
1525 read_unlock_bh(&table
->tb6_lock
);
1530 static struct rt6_info
*ip6_route_redirect(const struct in6_addr
*dest
,
1531 const struct in6_addr
*src
,
1532 const struct in6_addr
*gateway
,
1533 struct net_device
*dev
)
1535 int flags
= RT6_LOOKUP_F_HAS_SADDR
;
1536 struct net
*net
= dev_net(dev
);
1537 struct ip6rd_flowi rdfl
= {
1539 .flowi6_oif
= dev
->ifindex
,
1545 ipv6_addr_copy(&rdfl
.gateway
, gateway
);
1547 if (rt6_need_strict(dest
))
1548 flags
|= RT6_LOOKUP_F_IFACE
;
1550 return (struct rt6_info
*)fib6_rule_lookup(net
, &rdfl
.fl6
,
1551 flags
, __ip6_route_redirect
);
1554 void rt6_redirect(const struct in6_addr
*dest
, const struct in6_addr
*src
,
1555 const struct in6_addr
*saddr
,
1556 struct neighbour
*neigh
, u8
*lladdr
, int on_link
)
1558 struct rt6_info
*rt
, *nrt
= NULL
;
1559 struct netevent_redirect netevent
;
1560 struct net
*net
= dev_net(neigh
->dev
);
1562 rt
= ip6_route_redirect(dest
, src
, saddr
, neigh
->dev
);
1564 if (rt
== net
->ipv6
.ip6_null_entry
) {
1565 if (net_ratelimit())
1566 printk(KERN_DEBUG
"rt6_redirect: source isn't a valid nexthop "
1567 "for redirect target\n");
1572 * We have finally decided to accept it.
1575 neigh_update(neigh
, lladdr
, NUD_STALE
,
1576 NEIGH_UPDATE_F_WEAK_OVERRIDE
|
1577 NEIGH_UPDATE_F_OVERRIDE
|
1578 (on_link
? 0 : (NEIGH_UPDATE_F_OVERRIDE_ISROUTER
|
1579 NEIGH_UPDATE_F_ISROUTER
))
1583 * Redirect received -> path was valid.
1584 * Look, redirects are sent only in response to data packets,
1585 * so that this nexthop apparently is reachable. --ANK
1587 dst_confirm(&rt
->dst
);
1589 /* Duplicate redirect: silently ignore. */
1590 if (neigh
== dst_get_neighbour(&rt
->dst
))
1593 nrt
= ip6_rt_copy(rt
, dest
);
1597 nrt
->rt6i_flags
= RTF_GATEWAY
|RTF_UP
|RTF_DYNAMIC
|RTF_CACHE
;
1599 nrt
->rt6i_flags
&= ~RTF_GATEWAY
;
1601 nrt
->rt6i_dst
.plen
= 128;
1602 nrt
->dst
.flags
|= DST_HOST
;
1604 ipv6_addr_copy(&nrt
->rt6i_gateway
, (struct in6_addr
*)neigh
->primary_key
);
1605 dst_set_neighbour(&nrt
->dst
, neigh_clone(neigh
));
1607 if (ip6_ins_rt(nrt
))
1610 netevent
.old
= &rt
->dst
;
1611 netevent
.new = &nrt
->dst
;
1612 call_netevent_notifiers(NETEVENT_REDIRECT
, &netevent
);
1614 if (rt
->rt6i_flags
&RTF_CACHE
) {
1620 dst_release(&rt
->dst
);
1624 * Handle ICMP "packet too big" messages
1625 * i.e. Path MTU discovery
1628 static void rt6_do_pmtu_disc(const struct in6_addr
*daddr
, const struct in6_addr
*saddr
,
1629 struct net
*net
, u32 pmtu
, int ifindex
)
1631 struct rt6_info
*rt
, *nrt
;
1634 rt
= rt6_lookup(net
, daddr
, saddr
, ifindex
, 0);
1638 if (rt6_check_expired(rt
)) {
1643 if (pmtu
>= dst_mtu(&rt
->dst
))
1646 if (pmtu
< IPV6_MIN_MTU
) {
1648 * According to RFC2460, PMTU is set to the IPv6 Minimum Link
1649 * MTU (1280) and a fragment header should always be included
1650 * after a node receiving Too Big message reporting PMTU is
1651 * less than the IPv6 Minimum Link MTU.
1653 pmtu
= IPV6_MIN_MTU
;
1657 /* New mtu received -> path was valid.
1658 They are sent only in response to data packets,
1659 so that this nexthop apparently is reachable. --ANK
1661 dst_confirm(&rt
->dst
);
1663 /* Host route. If it is static, it would be better
1664 not to override it, but add new one, so that
1665 when cache entry will expire old pmtu
1666 would return automatically.
1668 if (rt
->rt6i_flags
& RTF_CACHE
) {
1669 dst_metric_set(&rt
->dst
, RTAX_MTU
, pmtu
);
1671 u32 features
= dst_metric(&rt
->dst
, RTAX_FEATURES
);
1672 features
|= RTAX_FEATURE_ALLFRAG
;
1673 dst_metric_set(&rt
->dst
, RTAX_FEATURES
, features
);
1675 dst_set_expires(&rt
->dst
, net
->ipv6
.sysctl
.ip6_rt_mtu_expires
);
1676 rt
->rt6i_flags
|= RTF_MODIFIED
|RTF_EXPIRES
;
1681 Two cases are possible:
1682 1. It is connected route. Action: COW
1683 2. It is gatewayed route or NONEXTHOP route. Action: clone it.
1685 if (!dst_get_neighbour(&rt
->dst
) && !(rt
->rt6i_flags
& RTF_NONEXTHOP
))
1686 nrt
= rt6_alloc_cow(rt
, daddr
, saddr
);
1688 nrt
= rt6_alloc_clone(rt
, daddr
);
1691 dst_metric_set(&nrt
->dst
, RTAX_MTU
, pmtu
);
1693 u32 features
= dst_metric(&nrt
->dst
, RTAX_FEATURES
);
1694 features
|= RTAX_FEATURE_ALLFRAG
;
1695 dst_metric_set(&nrt
->dst
, RTAX_FEATURES
, features
);
1698 /* According to RFC 1981, detecting PMTU increase shouldn't be
1699 * happened within 5 mins, the recommended timer is 10 mins.
1700 * Here this route expiration time is set to ip6_rt_mtu_expires
1701 * which is 10 mins. After 10 mins the decreased pmtu is expired
1702 * and detecting PMTU increase will be automatically happened.
1704 dst_set_expires(&nrt
->dst
, net
->ipv6
.sysctl
.ip6_rt_mtu_expires
);
1705 nrt
->rt6i_flags
|= RTF_DYNAMIC
|RTF_EXPIRES
;
1710 dst_release(&rt
->dst
);
1713 void rt6_pmtu_discovery(const struct in6_addr
*daddr
, const struct in6_addr
*saddr
,
1714 struct net_device
*dev
, u32 pmtu
)
1716 struct net
*net
= dev_net(dev
);
1719 * RFC 1981 states that a node "MUST reduce the size of the packets it
1720 * is sending along the path" that caused the Packet Too Big message.
1721 * Since it's not possible in the general case to determine which
1722 * interface was used to send the original packet, we update the MTU
1723 * on the interface that will be used to send future packets. We also
1724 * update the MTU on the interface that received the Packet Too Big in
1725 * case the original packet was forced out that interface with
1726 * SO_BINDTODEVICE or similar. This is the next best thing to the
1727 * correct behaviour, which would be to update the MTU on all
1730 rt6_do_pmtu_disc(daddr
, saddr
, net
, pmtu
, 0);
1731 rt6_do_pmtu_disc(daddr
, saddr
, net
, pmtu
, dev
->ifindex
);
1735 * Misc support functions
1738 static struct rt6_info
*ip6_rt_copy(const struct rt6_info
*ort
,
1739 const struct in6_addr
*dest
)
1741 struct net
*net
= dev_net(ort
->rt6i_dev
);
1742 struct rt6_info
*rt
= ip6_dst_alloc(&net
->ipv6
.ip6_dst_ops
,
1746 rt
->dst
.input
= ort
->dst
.input
;
1747 rt
->dst
.output
= ort
->dst
.output
;
1749 ipv6_addr_copy(&rt
->rt6i_dst
.addr
, dest
);
1750 rt
->rt6i_dst
.plen
= ort
->rt6i_dst
.plen
;
1751 dst_copy_metrics(&rt
->dst
, &ort
->dst
);
1752 rt
->dst
.error
= ort
->dst
.error
;
1753 rt
->rt6i_idev
= ort
->rt6i_idev
;
1755 in6_dev_hold(rt
->rt6i_idev
);
1756 rt
->dst
.lastuse
= jiffies
;
1757 rt
->rt6i_expires
= 0;
1759 ipv6_addr_copy(&rt
->rt6i_gateway
, &ort
->rt6i_gateway
);
1760 rt
->rt6i_flags
= ort
->rt6i_flags
& ~RTF_EXPIRES
;
1761 rt
->rt6i_metric
= 0;
1763 #ifdef CONFIG_IPV6_SUBTREES
1764 memcpy(&rt
->rt6i_src
, &ort
->rt6i_src
, sizeof(struct rt6key
));
1766 memcpy(&rt
->rt6i_prefsrc
, &ort
->rt6i_prefsrc
, sizeof(struct rt6key
));
1767 rt
->rt6i_table
= ort
->rt6i_table
;
1772 #ifdef CONFIG_IPV6_ROUTE_INFO
1773 static struct rt6_info
*rt6_get_route_info(struct net
*net
,
1774 const struct in6_addr
*prefix
, int prefixlen
,
1775 const struct in6_addr
*gwaddr
, int ifindex
)
1777 struct fib6_node
*fn
;
1778 struct rt6_info
*rt
= NULL
;
1779 struct fib6_table
*table
;
1781 table
= fib6_get_table(net
, RT6_TABLE_INFO
);
1785 write_lock_bh(&table
->tb6_lock
);
1786 fn
= fib6_locate(&table
->tb6_root
, prefix
,prefixlen
, NULL
, 0);
1790 for (rt
= fn
->leaf
; rt
; rt
= rt
->dst
.rt6_next
) {
1791 if (rt
->rt6i_dev
->ifindex
!= ifindex
)
1793 if ((rt
->rt6i_flags
& (RTF_ROUTEINFO
|RTF_GATEWAY
)) != (RTF_ROUTEINFO
|RTF_GATEWAY
))
1795 if (!ipv6_addr_equal(&rt
->rt6i_gateway
, gwaddr
))
1801 write_unlock_bh(&table
->tb6_lock
);
1805 static struct rt6_info
*rt6_add_route_info(struct net
*net
,
1806 const struct in6_addr
*prefix
, int prefixlen
,
1807 const struct in6_addr
*gwaddr
, int ifindex
,
1810 struct fib6_config cfg
= {
1811 .fc_table
= RT6_TABLE_INFO
,
1812 .fc_metric
= IP6_RT_PRIO_USER
,
1813 .fc_ifindex
= ifindex
,
1814 .fc_dst_len
= prefixlen
,
1815 .fc_flags
= RTF_GATEWAY
| RTF_ADDRCONF
| RTF_ROUTEINFO
|
1816 RTF_UP
| RTF_PREF(pref
),
1818 .fc_nlinfo
.nlh
= NULL
,
1819 .fc_nlinfo
.nl_net
= net
,
1822 ipv6_addr_copy(&cfg
.fc_dst
, prefix
);
1823 ipv6_addr_copy(&cfg
.fc_gateway
, gwaddr
);
1825 /* We should treat it as a default route if prefix length is 0. */
1827 cfg
.fc_flags
|= RTF_DEFAULT
;
1829 ip6_route_add(&cfg
);
1831 return rt6_get_route_info(net
, prefix
, prefixlen
, gwaddr
, ifindex
);
1835 struct rt6_info
*rt6_get_dflt_router(const struct in6_addr
*addr
, struct net_device
*dev
)
1837 struct rt6_info
*rt
;
1838 struct fib6_table
*table
;
1840 table
= fib6_get_table(dev_net(dev
), RT6_TABLE_DFLT
);
1844 write_lock_bh(&table
->tb6_lock
);
1845 for (rt
= table
->tb6_root
.leaf
; rt
; rt
=rt
->dst
.rt6_next
) {
1846 if (dev
== rt
->rt6i_dev
&&
1847 ((rt
->rt6i_flags
& (RTF_ADDRCONF
| RTF_DEFAULT
)) == (RTF_ADDRCONF
| RTF_DEFAULT
)) &&
1848 ipv6_addr_equal(&rt
->rt6i_gateway
, addr
))
1853 write_unlock_bh(&table
->tb6_lock
);
1857 struct rt6_info
*rt6_add_dflt_router(const struct in6_addr
*gwaddr
,
1858 struct net_device
*dev
,
1861 struct fib6_config cfg
= {
1862 .fc_table
= RT6_TABLE_DFLT
,
1863 .fc_metric
= IP6_RT_PRIO_USER
,
1864 .fc_ifindex
= dev
->ifindex
,
1865 .fc_flags
= RTF_GATEWAY
| RTF_ADDRCONF
| RTF_DEFAULT
|
1866 RTF_UP
| RTF_EXPIRES
| RTF_PREF(pref
),
1868 .fc_nlinfo
.nlh
= NULL
,
1869 .fc_nlinfo
.nl_net
= dev_net(dev
),
1872 ipv6_addr_copy(&cfg
.fc_gateway
, gwaddr
);
1874 ip6_route_add(&cfg
);
1876 return rt6_get_dflt_router(gwaddr
, dev
);
1879 void rt6_purge_dflt_routers(struct net
*net
)
1881 struct rt6_info
*rt
;
1882 struct fib6_table
*table
;
1884 /* NOTE: Keep consistent with rt6_get_dflt_router */
1885 table
= fib6_get_table(net
, RT6_TABLE_DFLT
);
1890 read_lock_bh(&table
->tb6_lock
);
1891 for (rt
= table
->tb6_root
.leaf
; rt
; rt
= rt
->dst
.rt6_next
) {
1892 if (rt
->rt6i_flags
& (RTF_DEFAULT
| RTF_ADDRCONF
)) {
1894 read_unlock_bh(&table
->tb6_lock
);
1899 read_unlock_bh(&table
->tb6_lock
);
1902 static void rtmsg_to_fib6_config(struct net
*net
,
1903 struct in6_rtmsg
*rtmsg
,
1904 struct fib6_config
*cfg
)
1906 memset(cfg
, 0, sizeof(*cfg
));
1908 cfg
->fc_table
= RT6_TABLE_MAIN
;
1909 cfg
->fc_ifindex
= rtmsg
->rtmsg_ifindex
;
1910 cfg
->fc_metric
= rtmsg
->rtmsg_metric
;
1911 cfg
->fc_expires
= rtmsg
->rtmsg_info
;
1912 cfg
->fc_dst_len
= rtmsg
->rtmsg_dst_len
;
1913 cfg
->fc_src_len
= rtmsg
->rtmsg_src_len
;
1914 cfg
->fc_flags
= rtmsg
->rtmsg_flags
;
1916 cfg
->fc_nlinfo
.nl_net
= net
;
1918 ipv6_addr_copy(&cfg
->fc_dst
, &rtmsg
->rtmsg_dst
);
1919 ipv6_addr_copy(&cfg
->fc_src
, &rtmsg
->rtmsg_src
);
1920 ipv6_addr_copy(&cfg
->fc_gateway
, &rtmsg
->rtmsg_gateway
);
1923 int ipv6_route_ioctl(struct net
*net
, unsigned int cmd
, void __user
*arg
)
1925 struct fib6_config cfg
;
1926 struct in6_rtmsg rtmsg
;
1930 case SIOCADDRT
: /* Add a route */
1931 case SIOCDELRT
: /* Delete a route */
1932 if (!capable(CAP_NET_ADMIN
))
1934 err
= copy_from_user(&rtmsg
, arg
,
1935 sizeof(struct in6_rtmsg
));
1939 rtmsg_to_fib6_config(net
, &rtmsg
, &cfg
);
1944 err
= ip6_route_add(&cfg
);
1947 err
= ip6_route_del(&cfg
);
1961 * Drop the packet on the floor
1964 static int ip6_pkt_drop(struct sk_buff
*skb
, u8 code
, int ipstats_mib_noroutes
)
1967 struct dst_entry
*dst
= skb_dst(skb
);
1968 switch (ipstats_mib_noroutes
) {
1969 case IPSTATS_MIB_INNOROUTES
:
1970 type
= ipv6_addr_type(&ipv6_hdr(skb
)->daddr
);
1971 if (type
== IPV6_ADDR_ANY
) {
1972 IP6_INC_STATS(dev_net(dst
->dev
), ip6_dst_idev(dst
),
1973 IPSTATS_MIB_INADDRERRORS
);
1977 case IPSTATS_MIB_OUTNOROUTES
:
1978 IP6_INC_STATS(dev_net(dst
->dev
), ip6_dst_idev(dst
),
1979 ipstats_mib_noroutes
);
1982 icmpv6_send(skb
, ICMPV6_DEST_UNREACH
, code
, 0);
1987 static int ip6_pkt_discard(struct sk_buff
*skb
)
1989 return ip6_pkt_drop(skb
, ICMPV6_NOROUTE
, IPSTATS_MIB_INNOROUTES
);
1992 static int ip6_pkt_discard_out(struct sk_buff
*skb
)
1994 skb
->dev
= skb_dst(skb
)->dev
;
1995 return ip6_pkt_drop(skb
, ICMPV6_NOROUTE
, IPSTATS_MIB_OUTNOROUTES
);
1998 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
2000 static int ip6_pkt_prohibit(struct sk_buff
*skb
)
2002 return ip6_pkt_drop(skb
, ICMPV6_ADM_PROHIBITED
, IPSTATS_MIB_INNOROUTES
);
2005 static int ip6_pkt_prohibit_out(struct sk_buff
*skb
)
2007 skb
->dev
= skb_dst(skb
)->dev
;
2008 return ip6_pkt_drop(skb
, ICMPV6_ADM_PROHIBITED
, IPSTATS_MIB_OUTNOROUTES
);
2014 * Allocate a dst for local (unicast / anycast) address.
2017 struct rt6_info
*addrconf_dst_alloc(struct inet6_dev
*idev
,
2018 const struct in6_addr
*addr
,
2021 struct net
*net
= dev_net(idev
->dev
);
2022 struct rt6_info
*rt
= ip6_dst_alloc(&net
->ipv6
.ip6_dst_ops
,
2023 net
->loopback_dev
, 0);
2024 struct neighbour
*neigh
;
2027 if (net_ratelimit())
2028 pr_warning("IPv6: Maximum number of routes reached,"
2029 " consider increasing route/max_size.\n");
2030 return ERR_PTR(-ENOMEM
);
2035 rt
->dst
.flags
|= DST_HOST
;
2036 rt
->dst
.input
= ip6_input
;
2037 rt
->dst
.output
= ip6_output
;
2038 rt
->rt6i_idev
= idev
;
2039 rt
->dst
.obsolete
= -1;
2041 rt
->rt6i_flags
= RTF_UP
| RTF_NONEXTHOP
;
2043 rt
->rt6i_flags
|= RTF_ANYCAST
;
2045 rt
->rt6i_flags
|= RTF_LOCAL
;
2046 neigh
= ndisc_get_neigh(rt
->rt6i_dev
, &rt
->rt6i_gateway
);
2047 if (IS_ERR(neigh
)) {
2050 return ERR_CAST(neigh
);
2052 dst_set_neighbour(&rt
->dst
, neigh
);
2054 ipv6_addr_copy(&rt
->rt6i_dst
.addr
, addr
);
2055 rt
->rt6i_dst
.plen
= 128;
2056 rt
->rt6i_table
= fib6_get_table(net
, RT6_TABLE_LOCAL
);
2058 atomic_set(&rt
->dst
.__refcnt
, 1);
2063 int ip6_route_get_saddr(struct net
*net
,
2064 struct rt6_info
*rt
,
2065 const struct in6_addr
*daddr
,
2067 struct in6_addr
*saddr
)
2069 struct inet6_dev
*idev
= ip6_dst_idev((struct dst_entry
*)rt
);
2071 if (rt
->rt6i_prefsrc
.plen
)
2072 ipv6_addr_copy(saddr
, &rt
->rt6i_prefsrc
.addr
);
2074 err
= ipv6_dev_get_saddr(net
, idev
? idev
->dev
: NULL
,
2075 daddr
, prefs
, saddr
);
2079 /* remove deleted ip from prefsrc entries */
2080 struct arg_dev_net_ip
{
2081 struct net_device
*dev
;
2083 struct in6_addr
*addr
;
2086 static int fib6_remove_prefsrc(struct rt6_info
*rt
, void *arg
)
2088 struct net_device
*dev
= ((struct arg_dev_net_ip
*)arg
)->dev
;
2089 struct net
*net
= ((struct arg_dev_net_ip
*)arg
)->net
;
2090 struct in6_addr
*addr
= ((struct arg_dev_net_ip
*)arg
)->addr
;
2092 if (((void *)rt
->rt6i_dev
== dev
|| dev
== NULL
) &&
2093 rt
!= net
->ipv6
.ip6_null_entry
&&
2094 ipv6_addr_equal(addr
, &rt
->rt6i_prefsrc
.addr
)) {
2095 /* remove prefsrc entry */
2096 rt
->rt6i_prefsrc
.plen
= 0;
2101 void rt6_remove_prefsrc(struct inet6_ifaddr
*ifp
)
2103 struct net
*net
= dev_net(ifp
->idev
->dev
);
2104 struct arg_dev_net_ip adni
= {
2105 .dev
= ifp
->idev
->dev
,
2109 fib6_clean_all(net
, fib6_remove_prefsrc
, 0, &adni
);
2112 struct arg_dev_net
{
2113 struct net_device
*dev
;
2117 static int fib6_ifdown(struct rt6_info
*rt
, void *arg
)
2119 const struct arg_dev_net
*adn
= arg
;
2120 const struct net_device
*dev
= adn
->dev
;
2122 if ((rt
->rt6i_dev
== dev
|| dev
== NULL
) &&
2123 rt
!= adn
->net
->ipv6
.ip6_null_entry
) {
2124 RT6_TRACE("deleted by ifdown %p\n", rt
);
2130 void rt6_ifdown(struct net
*net
, struct net_device
*dev
)
2132 struct arg_dev_net adn
= {
2137 fib6_clean_all(net
, fib6_ifdown
, 0, &adn
);
2138 icmp6_clean_all(fib6_ifdown
, &adn
);
2141 struct rt6_mtu_change_arg
2143 struct net_device
*dev
;
2147 static int rt6_mtu_change_route(struct rt6_info
*rt
, void *p_arg
)
2149 struct rt6_mtu_change_arg
*arg
= (struct rt6_mtu_change_arg
*) p_arg
;
2150 struct inet6_dev
*idev
;
2152 /* In IPv6 pmtu discovery is not optional,
2153 so that RTAX_MTU lock cannot disable it.
2154 We still use this lock to block changes
2155 caused by addrconf/ndisc.
2158 idev
= __in6_dev_get(arg
->dev
);
2162 /* For administrative MTU increase, there is no way to discover
2163 IPv6 PMTU increase, so PMTU increase should be updated here.
2164 Since RFC 1981 doesn't include administrative MTU increase
2165 update PMTU increase is a MUST. (i.e. jumbo frame)
2168 If new MTU is less than route PMTU, this new MTU will be the
2169 lowest MTU in the path, update the route PMTU to reflect PMTU
2170 decreases; if new MTU is greater than route PMTU, and the
2171 old MTU is the lowest MTU in the path, update the route PMTU
2172 to reflect the increase. In this case if the other nodes' MTU
2173 also have the lowest MTU, TOO BIG MESSAGE will be lead to
2176 if (rt
->rt6i_dev
== arg
->dev
&&
2177 !dst_metric_locked(&rt
->dst
, RTAX_MTU
) &&
2178 (dst_mtu(&rt
->dst
) >= arg
->mtu
||
2179 (dst_mtu(&rt
->dst
) < arg
->mtu
&&
2180 dst_mtu(&rt
->dst
) == idev
->cnf
.mtu6
))) {
2181 dst_metric_set(&rt
->dst
, RTAX_MTU
, arg
->mtu
);
2186 void rt6_mtu_change(struct net_device
*dev
, unsigned mtu
)
2188 struct rt6_mtu_change_arg arg
= {
2193 fib6_clean_all(dev_net(dev
), rt6_mtu_change_route
, 0, &arg
);
2196 static const struct nla_policy rtm_ipv6_policy
[RTA_MAX
+1] = {
2197 [RTA_GATEWAY
] = { .len
= sizeof(struct in6_addr
) },
2198 [RTA_OIF
] = { .type
= NLA_U32
},
2199 [RTA_IIF
] = { .type
= NLA_U32
},
2200 [RTA_PRIORITY
] = { .type
= NLA_U32
},
2201 [RTA_METRICS
] = { .type
= NLA_NESTED
},
2204 static int rtm_to_fib6_config(struct sk_buff
*skb
, struct nlmsghdr
*nlh
,
2205 struct fib6_config
*cfg
)
2208 struct nlattr
*tb
[RTA_MAX
+1];
2211 err
= nlmsg_parse(nlh
, sizeof(*rtm
), tb
, RTA_MAX
, rtm_ipv6_policy
);
2216 rtm
= nlmsg_data(nlh
);
2217 memset(cfg
, 0, sizeof(*cfg
));
2219 cfg
->fc_table
= rtm
->rtm_table
;
2220 cfg
->fc_dst_len
= rtm
->rtm_dst_len
;
2221 cfg
->fc_src_len
= rtm
->rtm_src_len
;
2222 cfg
->fc_flags
= RTF_UP
;
2223 cfg
->fc_protocol
= rtm
->rtm_protocol
;
2225 if (rtm
->rtm_type
== RTN_UNREACHABLE
)
2226 cfg
->fc_flags
|= RTF_REJECT
;
2228 if (rtm
->rtm_type
== RTN_LOCAL
)
2229 cfg
->fc_flags
|= RTF_LOCAL
;
2231 cfg
->fc_nlinfo
.pid
= NETLINK_CB(skb
).pid
;
2232 cfg
->fc_nlinfo
.nlh
= nlh
;
2233 cfg
->fc_nlinfo
.nl_net
= sock_net(skb
->sk
);
2235 if (tb
[RTA_GATEWAY
]) {
2236 nla_memcpy(&cfg
->fc_gateway
, tb
[RTA_GATEWAY
], 16);
2237 cfg
->fc_flags
|= RTF_GATEWAY
;
2241 int plen
= (rtm
->rtm_dst_len
+ 7) >> 3;
2243 if (nla_len(tb
[RTA_DST
]) < plen
)
2246 nla_memcpy(&cfg
->fc_dst
, tb
[RTA_DST
], plen
);
2250 int plen
= (rtm
->rtm_src_len
+ 7) >> 3;
2252 if (nla_len(tb
[RTA_SRC
]) < plen
)
2255 nla_memcpy(&cfg
->fc_src
, tb
[RTA_SRC
], plen
);
2258 if (tb
[RTA_PREFSRC
])
2259 nla_memcpy(&cfg
->fc_prefsrc
, tb
[RTA_PREFSRC
], 16);
2262 cfg
->fc_ifindex
= nla_get_u32(tb
[RTA_OIF
]);
2264 if (tb
[RTA_PRIORITY
])
2265 cfg
->fc_metric
= nla_get_u32(tb
[RTA_PRIORITY
]);
2267 if (tb
[RTA_METRICS
]) {
2268 cfg
->fc_mx
= nla_data(tb
[RTA_METRICS
]);
2269 cfg
->fc_mx_len
= nla_len(tb
[RTA_METRICS
]);
2273 cfg
->fc_table
= nla_get_u32(tb
[RTA_TABLE
]);
2280 static int inet6_rtm_delroute(struct sk_buff
*skb
, struct nlmsghdr
* nlh
, void *arg
)
2282 struct fib6_config cfg
;
2285 err
= rtm_to_fib6_config(skb
, nlh
, &cfg
);
2289 return ip6_route_del(&cfg
);
2292 static int inet6_rtm_newroute(struct sk_buff
*skb
, struct nlmsghdr
* nlh
, void *arg
)
2294 struct fib6_config cfg
;
2297 err
= rtm_to_fib6_config(skb
, nlh
, &cfg
);
2301 return ip6_route_add(&cfg
);
2304 static inline size_t rt6_nlmsg_size(void)
2306 return NLMSG_ALIGN(sizeof(struct rtmsg
))
2307 + nla_total_size(16) /* RTA_SRC */
2308 + nla_total_size(16) /* RTA_DST */
2309 + nla_total_size(16) /* RTA_GATEWAY */
2310 + nla_total_size(16) /* RTA_PREFSRC */
2311 + nla_total_size(4) /* RTA_TABLE */
2312 + nla_total_size(4) /* RTA_IIF */
2313 + nla_total_size(4) /* RTA_OIF */
2314 + nla_total_size(4) /* RTA_PRIORITY */
2315 + RTAX_MAX
* nla_total_size(4) /* RTA_METRICS */
2316 + nla_total_size(sizeof(struct rta_cacheinfo
));
2319 static int rt6_fill_node(struct net
*net
,
2320 struct sk_buff
*skb
, struct rt6_info
*rt
,
2321 struct in6_addr
*dst
, struct in6_addr
*src
,
2322 int iif
, int type
, u32 pid
, u32 seq
,
2323 int prefix
, int nowait
, unsigned int flags
)
2326 struct nlmsghdr
*nlh
;
2330 if (prefix
) { /* user wants prefix routes only */
2331 if (!(rt
->rt6i_flags
& RTF_PREFIX_RT
)) {
2332 /* success since this is not a prefix route */
2337 nlh
= nlmsg_put(skb
, pid
, seq
, type
, sizeof(*rtm
), flags
);
2341 rtm
= nlmsg_data(nlh
);
2342 rtm
->rtm_family
= AF_INET6
;
2343 rtm
->rtm_dst_len
= rt
->rt6i_dst
.plen
;
2344 rtm
->rtm_src_len
= rt
->rt6i_src
.plen
;
2347 table
= rt
->rt6i_table
->tb6_id
;
2349 table
= RT6_TABLE_UNSPEC
;
2350 rtm
->rtm_table
= table
;
2351 NLA_PUT_U32(skb
, RTA_TABLE
, table
);
2352 if (rt
->rt6i_flags
&RTF_REJECT
)
2353 rtm
->rtm_type
= RTN_UNREACHABLE
;
2354 else if (rt
->rt6i_flags
&RTF_LOCAL
)
2355 rtm
->rtm_type
= RTN_LOCAL
;
2356 else if (rt
->rt6i_dev
&& (rt
->rt6i_dev
->flags
&IFF_LOOPBACK
))
2357 rtm
->rtm_type
= RTN_LOCAL
;
2359 rtm
->rtm_type
= RTN_UNICAST
;
2361 rtm
->rtm_scope
= RT_SCOPE_UNIVERSE
;
2362 rtm
->rtm_protocol
= rt
->rt6i_protocol
;
2363 if (rt
->rt6i_flags
&RTF_DYNAMIC
)
2364 rtm
->rtm_protocol
= RTPROT_REDIRECT
;
2365 else if (rt
->rt6i_flags
& RTF_ADDRCONF
)
2366 rtm
->rtm_protocol
= RTPROT_KERNEL
;
2367 else if (rt
->rt6i_flags
&RTF_DEFAULT
)
2368 rtm
->rtm_protocol
= RTPROT_RA
;
2370 if (rt
->rt6i_flags
&RTF_CACHE
)
2371 rtm
->rtm_flags
|= RTM_F_CLONED
;
2374 NLA_PUT(skb
, RTA_DST
, 16, dst
);
2375 rtm
->rtm_dst_len
= 128;
2376 } else if (rtm
->rtm_dst_len
)
2377 NLA_PUT(skb
, RTA_DST
, 16, &rt
->rt6i_dst
.addr
);
2378 #ifdef CONFIG_IPV6_SUBTREES
2380 NLA_PUT(skb
, RTA_SRC
, 16, src
);
2381 rtm
->rtm_src_len
= 128;
2382 } else if (rtm
->rtm_src_len
)
2383 NLA_PUT(skb
, RTA_SRC
, 16, &rt
->rt6i_src
.addr
);
2386 #ifdef CONFIG_IPV6_MROUTE
2387 if (ipv6_addr_is_multicast(&rt
->rt6i_dst
.addr
)) {
2388 int err
= ip6mr_get_route(net
, skb
, rtm
, nowait
);
2393 goto nla_put_failure
;
2395 if (err
== -EMSGSIZE
)
2396 goto nla_put_failure
;
2401 NLA_PUT_U32(skb
, RTA_IIF
, iif
);
2403 struct in6_addr saddr_buf
;
2404 if (ip6_route_get_saddr(net
, rt
, dst
, 0, &saddr_buf
) == 0)
2405 NLA_PUT(skb
, RTA_PREFSRC
, 16, &saddr_buf
);
2408 if (rt
->rt6i_prefsrc
.plen
) {
2409 struct in6_addr saddr_buf
;
2410 ipv6_addr_copy(&saddr_buf
, &rt
->rt6i_prefsrc
.addr
);
2411 NLA_PUT(skb
, RTA_PREFSRC
, 16, &saddr_buf
);
2414 if (rtnetlink_put_metrics(skb
, dst_metrics_ptr(&rt
->dst
)) < 0)
2415 goto nla_put_failure
;
2417 if (dst_get_neighbour(&rt
->dst
))
2418 NLA_PUT(skb
, RTA_GATEWAY
, 16, &dst_get_neighbour(&rt
->dst
)->primary_key
);
2421 NLA_PUT_U32(skb
, RTA_OIF
, rt
->rt6i_dev
->ifindex
);
2423 NLA_PUT_U32(skb
, RTA_PRIORITY
, rt
->rt6i_metric
);
2425 if (!(rt
->rt6i_flags
& RTF_EXPIRES
))
2427 else if (rt
->rt6i_expires
- jiffies
< INT_MAX
)
2428 expires
= rt
->rt6i_expires
- jiffies
;
2432 if (rtnl_put_cacheinfo(skb
, &rt
->dst
, 0, 0, 0,
2433 expires
, rt
->dst
.error
) < 0)
2434 goto nla_put_failure
;
2436 return nlmsg_end(skb
, nlh
);
2439 nlmsg_cancel(skb
, nlh
);
2443 int rt6_dump_route(struct rt6_info
*rt
, void *p_arg
)
2445 struct rt6_rtnl_dump_arg
*arg
= (struct rt6_rtnl_dump_arg
*) p_arg
;
2448 if (nlmsg_len(arg
->cb
->nlh
) >= sizeof(struct rtmsg
)) {
2449 struct rtmsg
*rtm
= nlmsg_data(arg
->cb
->nlh
);
2450 prefix
= (rtm
->rtm_flags
& RTM_F_PREFIX
) != 0;
2454 return rt6_fill_node(arg
->net
,
2455 arg
->skb
, rt
, NULL
, NULL
, 0, RTM_NEWROUTE
,
2456 NETLINK_CB(arg
->cb
->skb
).pid
, arg
->cb
->nlh
->nlmsg_seq
,
2457 prefix
, 0, NLM_F_MULTI
);
2460 static int inet6_rtm_getroute(struct sk_buff
*in_skb
, struct nlmsghdr
* nlh
, void *arg
)
2462 struct net
*net
= sock_net(in_skb
->sk
);
2463 struct nlattr
*tb
[RTA_MAX
+1];
2464 struct rt6_info
*rt
;
2465 struct sk_buff
*skb
;
2470 err
= nlmsg_parse(nlh
, sizeof(*rtm
), tb
, RTA_MAX
, rtm_ipv6_policy
);
2475 memset(&fl6
, 0, sizeof(fl6
));
2478 if (nla_len(tb
[RTA_SRC
]) < sizeof(struct in6_addr
))
2481 ipv6_addr_copy(&fl6
.saddr
, nla_data(tb
[RTA_SRC
]));
2485 if (nla_len(tb
[RTA_DST
]) < sizeof(struct in6_addr
))
2488 ipv6_addr_copy(&fl6
.daddr
, nla_data(tb
[RTA_DST
]));
2492 iif
= nla_get_u32(tb
[RTA_IIF
]);
2495 fl6
.flowi6_oif
= nla_get_u32(tb
[RTA_OIF
]);
2498 struct net_device
*dev
;
2499 dev
= __dev_get_by_index(net
, iif
);
2506 skb
= alloc_skb(NLMSG_GOODSIZE
, GFP_KERNEL
);
2512 /* Reserve room for dummy headers, this skb can pass
2513 through good chunk of routing engine.
2515 skb_reset_mac_header(skb
);
2516 skb_reserve(skb
, MAX_HEADER
+ sizeof(struct ipv6hdr
));
2518 rt
= (struct rt6_info
*) ip6_route_output(net
, NULL
, &fl6
);
2519 skb_dst_set(skb
, &rt
->dst
);
2521 err
= rt6_fill_node(net
, skb
, rt
, &fl6
.daddr
, &fl6
.saddr
, iif
,
2522 RTM_NEWROUTE
, NETLINK_CB(in_skb
).pid
,
2523 nlh
->nlmsg_seq
, 0, 0, 0);
2529 err
= rtnl_unicast(skb
, net
, NETLINK_CB(in_skb
).pid
);
2534 void inet6_rt_notify(int event
, struct rt6_info
*rt
, struct nl_info
*info
)
2536 struct sk_buff
*skb
;
2537 struct net
*net
= info
->nl_net
;
2542 seq
= info
->nlh
!= NULL
? info
->nlh
->nlmsg_seq
: 0;
2544 skb
= nlmsg_new(rt6_nlmsg_size(), gfp_any());
2548 err
= rt6_fill_node(net
, skb
, rt
, NULL
, NULL
, 0,
2549 event
, info
->pid
, seq
, 0, 0, 0);
2551 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
2552 WARN_ON(err
== -EMSGSIZE
);
2556 rtnl_notify(skb
, net
, info
->pid
, RTNLGRP_IPV6_ROUTE
,
2557 info
->nlh
, gfp_any());
2561 rtnl_set_sk_err(net
, RTNLGRP_IPV6_ROUTE
, err
);
2564 static int ip6_route_dev_notify(struct notifier_block
*this,
2565 unsigned long event
, void *data
)
2567 struct net_device
*dev
= (struct net_device
*)data
;
2568 struct net
*net
= dev_net(dev
);
2570 if (event
== NETDEV_REGISTER
&& (dev
->flags
& IFF_LOOPBACK
)) {
2571 net
->ipv6
.ip6_null_entry
->dst
.dev
= dev
;
2572 net
->ipv6
.ip6_null_entry
->rt6i_idev
= in6_dev_get(dev
);
2573 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
2574 net
->ipv6
.ip6_prohibit_entry
->dst
.dev
= dev
;
2575 net
->ipv6
.ip6_prohibit_entry
->rt6i_idev
= in6_dev_get(dev
);
2576 net
->ipv6
.ip6_blk_hole_entry
->dst
.dev
= dev
;
2577 net
->ipv6
.ip6_blk_hole_entry
->rt6i_idev
= in6_dev_get(dev
);
2588 #ifdef CONFIG_PROC_FS
2599 static int rt6_info_route(struct rt6_info
*rt
, void *p_arg
)
2601 struct seq_file
*m
= p_arg
;
2602 struct neighbour
*n
;
2604 seq_printf(m
, "%pi6 %02x ", &rt
->rt6i_dst
.addr
, rt
->rt6i_dst
.plen
);
2606 #ifdef CONFIG_IPV6_SUBTREES
2607 seq_printf(m
, "%pi6 %02x ", &rt
->rt6i_src
.addr
, rt
->rt6i_src
.plen
);
2609 seq_puts(m
, "00000000000000000000000000000000 00 ");
2611 n
= dst_get_neighbour(&rt
->dst
);
2613 seq_printf(m
, "%pi6", n
->primary_key
);
2615 seq_puts(m
, "00000000000000000000000000000000");
2617 seq_printf(m
, " %08x %08x %08x %08x %8s\n",
2618 rt
->rt6i_metric
, atomic_read(&rt
->dst
.__refcnt
),
2619 rt
->dst
.__use
, rt
->rt6i_flags
,
2620 rt
->rt6i_dev
? rt
->rt6i_dev
->name
: "");
2624 static int ipv6_route_show(struct seq_file
*m
, void *v
)
2626 struct net
*net
= (struct net
*)m
->private;
2627 fib6_clean_all(net
, rt6_info_route
, 0, m
);
2631 static int ipv6_route_open(struct inode
*inode
, struct file
*file
)
2633 return single_open_net(inode
, file
, ipv6_route_show
);
2636 static const struct file_operations ipv6_route_proc_fops
= {
2637 .owner
= THIS_MODULE
,
2638 .open
= ipv6_route_open
,
2640 .llseek
= seq_lseek
,
2641 .release
= single_release_net
,
2644 static int rt6_stats_seq_show(struct seq_file
*seq
, void *v
)
2646 struct net
*net
= (struct net
*)seq
->private;
2647 seq_printf(seq
, "%04x %04x %04x %04x %04x %04x %04x\n",
2648 net
->ipv6
.rt6_stats
->fib_nodes
,
2649 net
->ipv6
.rt6_stats
->fib_route_nodes
,
2650 net
->ipv6
.rt6_stats
->fib_rt_alloc
,
2651 net
->ipv6
.rt6_stats
->fib_rt_entries
,
2652 net
->ipv6
.rt6_stats
->fib_rt_cache
,
2653 dst_entries_get_slow(&net
->ipv6
.ip6_dst_ops
),
2654 net
->ipv6
.rt6_stats
->fib_discarded_routes
);
2659 static int rt6_stats_seq_open(struct inode
*inode
, struct file
*file
)
2661 return single_open_net(inode
, file
, rt6_stats_seq_show
);
2664 static const struct file_operations rt6_stats_seq_fops
= {
2665 .owner
= THIS_MODULE
,
2666 .open
= rt6_stats_seq_open
,
2668 .llseek
= seq_lseek
,
2669 .release
= single_release_net
,
2671 #endif /* CONFIG_PROC_FS */
2673 #ifdef CONFIG_SYSCTL
2676 int ipv6_sysctl_rtcache_flush(ctl_table
*ctl
, int write
,
2677 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
)
2684 net
= (struct net
*)ctl
->extra1
;
2685 delay
= net
->ipv6
.sysctl
.flush_delay
;
2686 proc_dointvec(ctl
, write
, buffer
, lenp
, ppos
);
2687 fib6_run_gc(delay
<= 0 ? ~0UL : (unsigned long)delay
, net
);
2691 ctl_table ipv6_route_table_template
[] = {
2693 .procname
= "flush",
2694 .data
= &init_net
.ipv6
.sysctl
.flush_delay
,
2695 .maxlen
= sizeof(int),
2697 .proc_handler
= ipv6_sysctl_rtcache_flush
2700 .procname
= "gc_thresh",
2701 .data
= &ip6_dst_ops_template
.gc_thresh
,
2702 .maxlen
= sizeof(int),
2704 .proc_handler
= proc_dointvec
,
2707 .procname
= "max_size",
2708 .data
= &init_net
.ipv6
.sysctl
.ip6_rt_max_size
,
2709 .maxlen
= sizeof(int),
2711 .proc_handler
= proc_dointvec
,
2714 .procname
= "gc_min_interval",
2715 .data
= &init_net
.ipv6
.sysctl
.ip6_rt_gc_min_interval
,
2716 .maxlen
= sizeof(int),
2718 .proc_handler
= proc_dointvec_jiffies
,
2721 .procname
= "gc_timeout",
2722 .data
= &init_net
.ipv6
.sysctl
.ip6_rt_gc_timeout
,
2723 .maxlen
= sizeof(int),
2725 .proc_handler
= proc_dointvec_jiffies
,
2728 .procname
= "gc_interval",
2729 .data
= &init_net
.ipv6
.sysctl
.ip6_rt_gc_interval
,
2730 .maxlen
= sizeof(int),
2732 .proc_handler
= proc_dointvec_jiffies
,
2735 .procname
= "gc_elasticity",
2736 .data
= &init_net
.ipv6
.sysctl
.ip6_rt_gc_elasticity
,
2737 .maxlen
= sizeof(int),
2739 .proc_handler
= proc_dointvec
,
2742 .procname
= "mtu_expires",
2743 .data
= &init_net
.ipv6
.sysctl
.ip6_rt_mtu_expires
,
2744 .maxlen
= sizeof(int),
2746 .proc_handler
= proc_dointvec_jiffies
,
2749 .procname
= "min_adv_mss",
2750 .data
= &init_net
.ipv6
.sysctl
.ip6_rt_min_advmss
,
2751 .maxlen
= sizeof(int),
2753 .proc_handler
= proc_dointvec
,
2756 .procname
= "gc_min_interval_ms",
2757 .data
= &init_net
.ipv6
.sysctl
.ip6_rt_gc_min_interval
,
2758 .maxlen
= sizeof(int),
2760 .proc_handler
= proc_dointvec_ms_jiffies
,
2765 struct ctl_table
* __net_init
ipv6_route_sysctl_init(struct net
*net
)
2767 struct ctl_table
*table
;
2769 table
= kmemdup(ipv6_route_table_template
,
2770 sizeof(ipv6_route_table_template
),
2774 table
[0].data
= &net
->ipv6
.sysctl
.flush_delay
;
2775 table
[0].extra1
= net
;
2776 table
[1].data
= &net
->ipv6
.ip6_dst_ops
.gc_thresh
;
2777 table
[2].data
= &net
->ipv6
.sysctl
.ip6_rt_max_size
;
2778 table
[3].data
= &net
->ipv6
.sysctl
.ip6_rt_gc_min_interval
;
2779 table
[4].data
= &net
->ipv6
.sysctl
.ip6_rt_gc_timeout
;
2780 table
[5].data
= &net
->ipv6
.sysctl
.ip6_rt_gc_interval
;
2781 table
[6].data
= &net
->ipv6
.sysctl
.ip6_rt_gc_elasticity
;
2782 table
[7].data
= &net
->ipv6
.sysctl
.ip6_rt_mtu_expires
;
2783 table
[8].data
= &net
->ipv6
.sysctl
.ip6_rt_min_advmss
;
2784 table
[9].data
= &net
->ipv6
.sysctl
.ip6_rt_gc_min_interval
;
2791 static int __net_init
ip6_route_net_init(struct net
*net
)
2795 memcpy(&net
->ipv6
.ip6_dst_ops
, &ip6_dst_ops_template
,
2796 sizeof(net
->ipv6
.ip6_dst_ops
));
2798 if (dst_entries_init(&net
->ipv6
.ip6_dst_ops
) < 0)
2799 goto out_ip6_dst_ops
;
2801 net
->ipv6
.ip6_null_entry
= kmemdup(&ip6_null_entry_template
,
2802 sizeof(*net
->ipv6
.ip6_null_entry
),
2804 if (!net
->ipv6
.ip6_null_entry
)
2805 goto out_ip6_dst_entries
;
2806 net
->ipv6
.ip6_null_entry
->dst
.path
=
2807 (struct dst_entry
*)net
->ipv6
.ip6_null_entry
;
2808 net
->ipv6
.ip6_null_entry
->dst
.ops
= &net
->ipv6
.ip6_dst_ops
;
2809 dst_init_metrics(&net
->ipv6
.ip6_null_entry
->dst
,
2810 ip6_template_metrics
, true);
2812 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
2813 net
->ipv6
.ip6_prohibit_entry
= kmemdup(&ip6_prohibit_entry_template
,
2814 sizeof(*net
->ipv6
.ip6_prohibit_entry
),
2816 if (!net
->ipv6
.ip6_prohibit_entry
)
2817 goto out_ip6_null_entry
;
2818 net
->ipv6
.ip6_prohibit_entry
->dst
.path
=
2819 (struct dst_entry
*)net
->ipv6
.ip6_prohibit_entry
;
2820 net
->ipv6
.ip6_prohibit_entry
->dst
.ops
= &net
->ipv6
.ip6_dst_ops
;
2821 dst_init_metrics(&net
->ipv6
.ip6_prohibit_entry
->dst
,
2822 ip6_template_metrics
, true);
2824 net
->ipv6
.ip6_blk_hole_entry
= kmemdup(&ip6_blk_hole_entry_template
,
2825 sizeof(*net
->ipv6
.ip6_blk_hole_entry
),
2827 if (!net
->ipv6
.ip6_blk_hole_entry
)
2828 goto out_ip6_prohibit_entry
;
2829 net
->ipv6
.ip6_blk_hole_entry
->dst
.path
=
2830 (struct dst_entry
*)net
->ipv6
.ip6_blk_hole_entry
;
2831 net
->ipv6
.ip6_blk_hole_entry
->dst
.ops
= &net
->ipv6
.ip6_dst_ops
;
2832 dst_init_metrics(&net
->ipv6
.ip6_blk_hole_entry
->dst
,
2833 ip6_template_metrics
, true);
2836 net
->ipv6
.sysctl
.flush_delay
= 0;
2837 net
->ipv6
.sysctl
.ip6_rt_max_size
= 4096;
2838 net
->ipv6
.sysctl
.ip6_rt_gc_min_interval
= HZ
/ 2;
2839 net
->ipv6
.sysctl
.ip6_rt_gc_timeout
= 60*HZ
;
2840 net
->ipv6
.sysctl
.ip6_rt_gc_interval
= 30*HZ
;
2841 net
->ipv6
.sysctl
.ip6_rt_gc_elasticity
= 9;
2842 net
->ipv6
.sysctl
.ip6_rt_mtu_expires
= 10*60*HZ
;
2843 net
->ipv6
.sysctl
.ip6_rt_min_advmss
= IPV6_MIN_MTU
- 20 - 40;
2845 #ifdef CONFIG_PROC_FS
2846 proc_net_fops_create(net
, "ipv6_route", 0, &ipv6_route_proc_fops
);
2847 proc_net_fops_create(net
, "rt6_stats", S_IRUGO
, &rt6_stats_seq_fops
);
2849 net
->ipv6
.ip6_rt_gc_expire
= 30*HZ
;
2855 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
2856 out_ip6_prohibit_entry
:
2857 kfree(net
->ipv6
.ip6_prohibit_entry
);
2859 kfree(net
->ipv6
.ip6_null_entry
);
2861 out_ip6_dst_entries
:
2862 dst_entries_destroy(&net
->ipv6
.ip6_dst_ops
);
2867 static void __net_exit
ip6_route_net_exit(struct net
*net
)
2869 #ifdef CONFIG_PROC_FS
2870 proc_net_remove(net
, "ipv6_route");
2871 proc_net_remove(net
, "rt6_stats");
2873 kfree(net
->ipv6
.ip6_null_entry
);
2874 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
2875 kfree(net
->ipv6
.ip6_prohibit_entry
);
2876 kfree(net
->ipv6
.ip6_blk_hole_entry
);
2878 dst_entries_destroy(&net
->ipv6
.ip6_dst_ops
);
2881 static struct pernet_operations ip6_route_net_ops
= {
2882 .init
= ip6_route_net_init
,
2883 .exit
= ip6_route_net_exit
,
2886 static struct notifier_block ip6_route_dev_notifier
= {
2887 .notifier_call
= ip6_route_dev_notify
,
2891 int __init
ip6_route_init(void)
2896 ip6_dst_ops_template
.kmem_cachep
=
2897 kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info
), 0,
2898 SLAB_HWCACHE_ALIGN
, NULL
);
2899 if (!ip6_dst_ops_template
.kmem_cachep
)
2902 ret
= dst_entries_init(&ip6_dst_blackhole_ops
);
2904 goto out_kmem_cache
;
2906 ret
= register_pernet_subsys(&ip6_route_net_ops
);
2908 goto out_dst_entries
;
2910 ip6_dst_blackhole_ops
.kmem_cachep
= ip6_dst_ops_template
.kmem_cachep
;
2912 /* Registering of the loopback is done before this portion of code,
2913 * the loopback reference in rt6_info will not be taken, do it
2914 * manually for init_net */
2915 init_net
.ipv6
.ip6_null_entry
->dst
.dev
= init_net
.loopback_dev
;
2916 init_net
.ipv6
.ip6_null_entry
->rt6i_idev
= in6_dev_get(init_net
.loopback_dev
);
2917 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
2918 init_net
.ipv6
.ip6_prohibit_entry
->dst
.dev
= init_net
.loopback_dev
;
2919 init_net
.ipv6
.ip6_prohibit_entry
->rt6i_idev
= in6_dev_get(init_net
.loopback_dev
);
2920 init_net
.ipv6
.ip6_blk_hole_entry
->dst
.dev
= init_net
.loopback_dev
;
2921 init_net
.ipv6
.ip6_blk_hole_entry
->rt6i_idev
= in6_dev_get(init_net
.loopback_dev
);
2925 goto out_register_subsys
;
2931 ret
= fib6_rules_init();
2936 if (__rtnl_register(PF_INET6
, RTM_NEWROUTE
, inet6_rtm_newroute
, NULL
, NULL
) ||
2937 __rtnl_register(PF_INET6
, RTM_DELROUTE
, inet6_rtm_delroute
, NULL
, NULL
) ||
2938 __rtnl_register(PF_INET6
, RTM_GETROUTE
, inet6_rtm_getroute
, NULL
, NULL
))
2939 goto fib6_rules_init
;
2941 ret
= register_netdevice_notifier(&ip6_route_dev_notifier
);
2943 goto fib6_rules_init
;
2949 fib6_rules_cleanup();
2954 out_register_subsys
:
2955 unregister_pernet_subsys(&ip6_route_net_ops
);
2957 dst_entries_destroy(&ip6_dst_blackhole_ops
);
2959 kmem_cache_destroy(ip6_dst_ops_template
.kmem_cachep
);
2963 void ip6_route_cleanup(void)
2965 unregister_netdevice_notifier(&ip6_route_dev_notifier
);
2966 fib6_rules_cleanup();
2969 unregister_pernet_subsys(&ip6_route_net_ops
);
2970 dst_entries_destroy(&ip6_dst_blackhole_ops
);
2971 kmem_cache_destroy(ip6_dst_ops_template
.kmem_cachep
);